commit 227ae31d859e638a44ce39b998df3d575b47b7e2 Author: Vihang Karajgaonkar Date: Wed Aug 15 17:01:58 2018 -0700 HIVE-20306 : Implement projection spec for fetching only requested fields from partitions diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 0ad2a2469e0330e050fdb8983078b80617afbbf1..f66db410e50214ce4e8203c344e6037745d89eb9 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -413,6 +413,13 @@ public Partition alterPartition(String catName, String dbName, String tblName, L } @Override + public List getPartitionSpecsByFilterAndProjection(Table table, boolean allowSql, + boolean allowJdo, List fieldList, String paramKeys, boolean excludeFlag) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionSpecsByFilterAndProjection(table, allowSql, allowJdo, fieldList, paramKeys, excludeFlag); + } + + @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java index 7dcfc170cc86f237398aad1536c3fbd79561d5a9..51b33355e1eb82cfbd2e22522fe2d03b91039971 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnsRequest st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list594 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list594.size); - long _elem595; - for (int _i596 = 0; _i596 < _list594.size; ++_i596) + org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list626.size); + long _elem627; + for (int _i628 = 0; _i628 < _list626.size; ++_i628) { - _elem595 = iprot.readI64(); - struct.txn_ids.add(_elem595); + _elem627 = iprot.readI64(); + struct.txn_ids.add(_elem627); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnsRequest s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter597 : struct.txn_ids) + for (long _iter629 : struct.txn_ids) { - oprot.writeI64(_iter597); + oprot.writeI64(_iter629); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter598 : struct.txn_ids) + for (long _iter630 : struct.txn_ids) { - oprot.writeI64(_iter598); + oprot.writeI64(_iter630); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list599.size); - long _elem600; - for (int _i601 = 0; _i601 < _list599.size; ++_i601) + org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list631.size); + long _elem632; + for (int _i633 = 0; _i633 < _list631.size; ++_i633) { - _elem600 = iprot.readI64(); - struct.txn_ids.add(_elem600); + _elem632 = iprot.readI64(); + struct.txn_ids.add(_elem632); } } struct.setTxn_idsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java index 8ece410555619626797770206e7d959cd6c31e6f..30e38a6de24e770207da9386c3331041982f2c3e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddCheckConstraintR case 1: // CHECK_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list416 = iprot.readListBegin(); - struct.checkConstraintCols = new ArrayList(_list416.size); - SQLCheckConstraint _elem417; - for (int _i418 = 0; _i418 < _list416.size; ++_i418) + org.apache.thrift.protocol.TList _list448 = iprot.readListBegin(); + struct.checkConstraintCols = new ArrayList(_list448.size); + SQLCheckConstraint _elem449; + for (int _i450 = 0; _i450 < _list448.size; ++_i450) { - _elem417 = new SQLCheckConstraint(); - _elem417.read(iprot); - struct.checkConstraintCols.add(_elem417); + _elem449 = new SQLCheckConstraint(); + _elem449.read(iprot); + struct.checkConstraintCols.add(_elem449); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddCheckConstraint oprot.writeFieldBegin(CHECK_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraintCols.size())); - for (SQLCheckConstraint _iter419 : struct.checkConstraintCols) + for (SQLCheckConstraint _iter451 : struct.checkConstraintCols) { - _iter419.write(oprot); + _iter451.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.checkConstraintCols.size()); - for (SQLCheckConstraint _iter420 : struct.checkConstraintCols) + for (SQLCheckConstraint _iter452 : struct.checkConstraintCols) { - _iter420.write(oprot); + _iter452.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintR public void read(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraintCols = new ArrayList(_list421.size); - SQLCheckConstraint _elem422; - for (int _i423 = 0; _i423 < _list421.size; ++_i423) + org.apache.thrift.protocol.TList _list453 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraintCols = new ArrayList(_list453.size); + SQLCheckConstraint _elem454; + for (int _i455 = 0; _i455 < _list453.size; ++_i455) { - _elem422 = new SQLCheckConstraint(); - _elem422.read(iprot); - struct.checkConstraintCols.add(_elem422); + _elem454 = new SQLCheckConstraint(); + _elem454.read(iprot); + struct.checkConstraintCols.add(_elem454); } } struct.setCheckConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java index 8a125d854edea766610eca8fe7a8a8f1ed3f5594..94017b1c3c7f19fafa27387ad0b7922745045a9b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDefaultConstrain case 1: // DEFAULT_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list408 = iprot.readListBegin(); - struct.defaultConstraintCols = new ArrayList(_list408.size); - SQLDefaultConstraint _elem409; - for (int _i410 = 0; _i410 < _list408.size; ++_i410) + org.apache.thrift.protocol.TList _list440 = iprot.readListBegin(); + struct.defaultConstraintCols = new ArrayList(_list440.size); + SQLDefaultConstraint _elem441; + for (int _i442 = 0; _i442 < _list440.size; ++_i442) { - _elem409 = new SQLDefaultConstraint(); - _elem409.read(iprot); - struct.defaultConstraintCols.add(_elem409); + _elem441 = new SQLDefaultConstraint(); + _elem441.read(iprot); + struct.defaultConstraintCols.add(_elem441); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDefaultConstrai oprot.writeFieldBegin(DEFAULT_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraintCols.size())); - for (SQLDefaultConstraint _iter411 : struct.defaultConstraintCols) + for (SQLDefaultConstraint _iter443 : struct.defaultConstraintCols) { - _iter411.write(oprot); + _iter443.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.defaultConstraintCols.size()); - for (SQLDefaultConstraint _iter412 : struct.defaultConstraintCols) + for (SQLDefaultConstraint _iter444 : struct.defaultConstraintCols) { - _iter412.write(oprot); + _iter444.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraintCols = new ArrayList(_list413.size); - SQLDefaultConstraint _elem414; - for (int _i415 = 0; _i415 < _list413.size; ++_i415) + org.apache.thrift.protocol.TList _list445 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraintCols = new ArrayList(_list445.size); + SQLDefaultConstraint _elem446; + for (int _i447 = 0; _i447 < _list445.size; ++_i447) { - _elem414 = new SQLDefaultConstraint(); - _elem414.read(iprot); - struct.defaultConstraintCols.add(_elem414); + _elem446 = new SQLDefaultConstraint(); + _elem446.read(iprot); + struct.defaultConstraintCols.add(_elem446); } } struct.setDefaultConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 45618e781cea1a8090a75dfadb56262041e3a0cd..9f4b4be756c34fe119311749717204aba600b1cc 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list716.size); - String _elem717; - for (int _i718 = 0; _i718 < _list716.size; ++_i718) + org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list748.size); + String _elem749; + for (int _i750 = 0; _i750 < _list748.size; ++_i750) { - _elem717 = iprot.readString(); - struct.partitionnames.add(_elem717); + _elem749 = iprot.readString(); + struct.partitionnames.add(_elem749); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter719 : struct.partitionnames) + for (String _iter751 : struct.partitionnames) { - oprot.writeString(_iter719); + oprot.writeString(_iter751); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter720 : struct.partitionnames) + for (String _iter752 : struct.partitionnames) { - oprot.writeString(_iter720); + oprot.writeString(_iter752); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list721.size); - String _elem722; - for (int _i723 = 0; _i723 < _list721.size; ++_i723) + org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list753.size); + String _elem754; + for (int _i755 = 0; _i755 < _list753.size; ++_i755) { - _elem722 = iprot.readString(); - struct.partitionnames.add(_elem722); + _elem754 = iprot.readString(); + struct.partitionnames.add(_elem754); } } struct.setPartitionnamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java index 0f443d4f60ae6b2f55ea5d1902708d005b53cac0..6ace391f8b835fd9b2d8a5990fe94f44a53cefad 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddForeignKeyReques case 1: // FOREIGN_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list384 = iprot.readListBegin(); - struct.foreignKeyCols = new ArrayList(_list384.size); - SQLForeignKey _elem385; - for (int _i386 = 0; _i386 < _list384.size; ++_i386) + org.apache.thrift.protocol.TList _list416 = iprot.readListBegin(); + struct.foreignKeyCols = new ArrayList(_list416.size); + SQLForeignKey _elem417; + for (int _i418 = 0; _i418 < _list416.size; ++_i418) { - _elem385 = new SQLForeignKey(); - _elem385.read(iprot); - struct.foreignKeyCols.add(_elem385); + _elem417 = new SQLForeignKey(); + _elem417.read(iprot); + struct.foreignKeyCols.add(_elem417); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddForeignKeyReque oprot.writeFieldBegin(FOREIGN_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeyCols.size())); - for (SQLForeignKey _iter387 : struct.foreignKeyCols) + for (SQLForeignKey _iter419 : struct.foreignKeyCols) { - _iter387.write(oprot); + _iter419.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeyCols.size()); - for (SQLForeignKey _iter388 : struct.foreignKeyCols) + for (SQLForeignKey _iter420 : struct.foreignKeyCols) { - _iter388.write(oprot); + _iter420.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeyCols = new ArrayList(_list389.size); - SQLForeignKey _elem390; - for (int _i391 = 0; _i391 < _list389.size; ++_i391) + org.apache.thrift.protocol.TList _list421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeyCols = new ArrayList(_list421.size); + SQLForeignKey _elem422; + for (int _i423 = 0; _i423 < _list421.size; ++_i423) { - _elem390 = new SQLForeignKey(); - _elem390.read(iprot); - struct.foreignKeyCols.add(_elem390); + _elem422 = new SQLForeignKey(); + _elem422.read(iprot); + struct.foreignKeyCols.add(_elem422); } } struct.setForeignKeyColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java index 0266bba2b0b9c347bc555ee7b40b4c2794e3ad41..4c4e6788790313bee47cd7ac0f5eda0f03f6770c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddNotNullConstrain case 1: // NOT_NULL_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list400 = iprot.readListBegin(); - struct.notNullConstraintCols = new ArrayList(_list400.size); - SQLNotNullConstraint _elem401; - for (int _i402 = 0; _i402 < _list400.size; ++_i402) + org.apache.thrift.protocol.TList _list432 = iprot.readListBegin(); + struct.notNullConstraintCols = new ArrayList(_list432.size); + SQLNotNullConstraint _elem433; + for (int _i434 = 0; _i434 < _list432.size; ++_i434) { - _elem401 = new SQLNotNullConstraint(); - _elem401.read(iprot); - struct.notNullConstraintCols.add(_elem401); + _elem433 = new SQLNotNullConstraint(); + _elem433.read(iprot); + struct.notNullConstraintCols.add(_elem433); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddNotNullConstrai oprot.writeFieldBegin(NOT_NULL_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraintCols.size())); - for (SQLNotNullConstraint _iter403 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter435 : struct.notNullConstraintCols) { - _iter403.write(oprot); + _iter435.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraintCols.size()); - for (SQLNotNullConstraint _iter404 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter436 : struct.notNullConstraintCols) { - _iter404.write(oprot); + _iter436.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraintCols = new ArrayList(_list405.size); - SQLNotNullConstraint _elem406; - for (int _i407 = 0; _i407 < _list405.size; ++_i407) + org.apache.thrift.protocol.TList _list437 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraintCols = new ArrayList(_list437.size); + SQLNotNullConstraint _elem438; + for (int _i439 = 0; _i439 < _list437.size; ++_i439) { - _elem406 = new SQLNotNullConstraint(); - _elem406.read(iprot); - struct.notNullConstraintCols.add(_elem406); + _elem438 = new SQLNotNullConstraint(); + _elem438.read(iprot); + struct.notNullConstraintCols.add(_elem438); } } struct.setNotNullConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 469a9a804171321e55ace6764c2cf3355f0778a9..74ecb533950fd32293664f82bd25bc369405b898 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -866,14 +866,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques case 3: // PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list490 = iprot.readListBegin(); - struct.parts = new ArrayList(_list490.size); - Partition _elem491; - for (int _i492 = 0; _i492 < _list490.size; ++_i492) + org.apache.thrift.protocol.TList _list522 = iprot.readListBegin(); + struct.parts = new ArrayList(_list522.size); + Partition _elem523; + for (int _i524 = 0; _i524 < _list522.size; ++_i524) { - _elem491 = new Partition(); - _elem491.read(iprot); - struct.parts.add(_elem491); + _elem523 = new Partition(); + _elem523.read(iprot); + struct.parts.add(_elem523); } iprot.readListEnd(); } @@ -941,9 +941,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldBegin(PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size())); - for (Partition _iter493 : struct.parts) + for (Partition _iter525 : struct.parts) { - _iter493.write(oprot); + _iter525.write(oprot); } oprot.writeListEnd(); } @@ -992,9 +992,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques oprot.writeString(struct.tblName); { oprot.writeI32(struct.parts.size()); - for (Partition _iter494 : struct.parts) + for (Partition _iter526 : struct.parts) { - _iter494.write(oprot); + _iter526.write(oprot); } } oprot.writeBool(struct.ifNotExists); @@ -1028,14 +1028,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.parts = new ArrayList(_list495.size); - Partition _elem496; - for (int _i497 = 0; _i497 < _list495.size; ++_i497) + org.apache.thrift.protocol.TList _list527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parts = new ArrayList(_list527.size); + Partition _elem528; + for (int _i529 = 0; _i529 < _list527.size; ++_i529) { - _elem496 = new Partition(); - _elem496.read(iprot); - struct.parts.add(_elem496); + _elem528 = new Partition(); + _elem528.read(iprot); + struct.parts.add(_elem528); } } struct.setPartsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index a00af609d45e3f4eb055e3664a376510aec926ae..63d37acc3f745c4ad3c15aa1948a6f11fa2c2749 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -426,14 +426,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list482 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list482.size); - Partition _elem483; - for (int _i484 = 0; _i484 < _list482.size; ++_i484) + org.apache.thrift.protocol.TList _list514 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list514.size); + Partition _elem515; + for (int _i516 = 0; _i516 < _list514.size; ++_i516) { - _elem483 = new Partition(); - _elem483.read(iprot); - struct.partitions.add(_elem483); + _elem515 = new Partition(); + _elem515.read(iprot); + struct.partitions.add(_elem515); } iprot.readListEnd(); } @@ -468,9 +468,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter485 : struct.partitions) + for (Partition _iter517 : struct.partitions) { - _iter485.write(oprot); + _iter517.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter486 : struct.partitions) + for (Partition _iter518 : struct.partitions) { - _iter486.write(oprot); + _iter518.write(oprot); } } } @@ -527,14 +527,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list487.size); - Partition _elem488; - for (int _i489 = 0; _i489 < _list487.size; ++_i489) + org.apache.thrift.protocol.TList _list519 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list519.size); + Partition _elem520; + for (int _i521 = 0; _i521 < _list519.size; ++_i521) { - _elem488 = new Partition(); - _elem488.read(iprot); - struct.partitions.add(_elem488); + _elem520 = new Partition(); + _elem520.read(iprot); + struct.partitions.add(_elem520); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java index 9069a419ec6f6d0fba3428e556f9cd815aa714cb..58bec2cfed71800e03fb4f9042d5667fc7e26b3a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPrimaryKeyReques case 1: // PRIMARY_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list376 = iprot.readListBegin(); - struct.primaryKeyCols = new ArrayList(_list376.size); - SQLPrimaryKey _elem377; - for (int _i378 = 0; _i378 < _list376.size; ++_i378) + org.apache.thrift.protocol.TList _list408 = iprot.readListBegin(); + struct.primaryKeyCols = new ArrayList(_list408.size); + SQLPrimaryKey _elem409; + for (int _i410 = 0; _i410 < _list408.size; ++_i410) { - _elem377 = new SQLPrimaryKey(); - _elem377.read(iprot); - struct.primaryKeyCols.add(_elem377); + _elem409 = new SQLPrimaryKey(); + _elem409.read(iprot); + struct.primaryKeyCols.add(_elem409); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPrimaryKeyReque oprot.writeFieldBegin(PRIMARY_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeyCols.size())); - for (SQLPrimaryKey _iter379 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter411 : struct.primaryKeyCols) { - _iter379.write(oprot); + _iter411.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeyCols.size()); - for (SQLPrimaryKey _iter380 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter412 : struct.primaryKeyCols) { - _iter380.write(oprot); + _iter412.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeyCols = new ArrayList(_list381.size); - SQLPrimaryKey _elem382; - for (int _i383 = 0; _i383 < _list381.size; ++_i383) + org.apache.thrift.protocol.TList _list413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeyCols = new ArrayList(_list413.size); + SQLPrimaryKey _elem414; + for (int _i415 = 0; _i415 < _list413.size; ++_i415) { - _elem382 = new SQLPrimaryKey(); - _elem382.read(iprot); - struct.primaryKeyCols.add(_elem382); + _elem414 = new SQLPrimaryKey(); + _elem414.read(iprot); + struct.primaryKeyCols.add(_elem414); } } struct.setPrimaryKeyColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java index c47db4a91082df50169388b3d58c067069b790b4..4ac6b386845711948ab0d84e2a10dc4d9e1fe3a4 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddUniqueConstraint case 1: // UNIQUE_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list392 = iprot.readListBegin(); - struct.uniqueConstraintCols = new ArrayList(_list392.size); - SQLUniqueConstraint _elem393; - for (int _i394 = 0; _i394 < _list392.size; ++_i394) + org.apache.thrift.protocol.TList _list424 = iprot.readListBegin(); + struct.uniqueConstraintCols = new ArrayList(_list424.size); + SQLUniqueConstraint _elem425; + for (int _i426 = 0; _i426 < _list424.size; ++_i426) { - _elem393 = new SQLUniqueConstraint(); - _elem393.read(iprot); - struct.uniqueConstraintCols.add(_elem393); + _elem425 = new SQLUniqueConstraint(); + _elem425.read(iprot); + struct.uniqueConstraintCols.add(_elem425); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddUniqueConstrain oprot.writeFieldBegin(UNIQUE_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraintCols.size())); - for (SQLUniqueConstraint _iter395 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter427 : struct.uniqueConstraintCols) { - _iter395.write(oprot); + _iter427.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraintCols.size()); - for (SQLUniqueConstraint _iter396 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter428 : struct.uniqueConstraintCols) { - _iter396.write(oprot); + _iter428.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint public void read(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraintCols = new ArrayList(_list397.size); - SQLUniqueConstraint _elem398; - for (int _i399 = 0; _i399 < _list397.size; ++_i399) + org.apache.thrift.protocol.TList _list429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraintCols = new ArrayList(_list429.size); + SQLUniqueConstraint _elem430; + for (int _i431 = 0; _i431 < _list429.size; ++_i431) { - _elem398 = new SQLUniqueConstraint(); - _elem398.read(iprot); - struct.uniqueConstraintCols.add(_elem398); + _elem430 = new SQLUniqueConstraint(); + _elem430.read(iprot); + struct.uniqueConstraintCols.add(_elem430); } } struct.setUniqueConstraintColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index 67d9b1061477e85cc71a10c4b5cc1559ac4ee1dc..d76573217f20a56ae58e0dd8dc1aae4488b6ab04 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -516,14 +516,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) t case 1: // COL_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list284 = iprot.readListBegin(); - struct.colStats = new ArrayList(_list284.size); - ColumnStatisticsObj _elem285; - for (int _i286 = 0; _i286 < _list284.size; ++_i286) + org.apache.thrift.protocol.TList _list316 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list316.size); + ColumnStatisticsObj _elem317; + for (int _i318 = 0; _i318 < _list316.size; ++_i318) { - _elem285 = new ColumnStatisticsObj(); - _elem285.read(iprot); - struct.colStats.add(_elem285); + _elem317 = new ColumnStatisticsObj(); + _elem317.read(iprot); + struct.colStats.add(_elem317); } iprot.readListEnd(); } @@ -565,9 +565,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) oprot.writeFieldBegin(COL_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); - for (ColumnStatisticsObj _iter287 : struct.colStats) + for (ColumnStatisticsObj _iter319 : struct.colStats) { - _iter287.write(oprot); + _iter319.write(oprot); } oprot.writeListEnd(); } @@ -600,9 +600,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.colStats.size()); - for (ColumnStatisticsObj _iter288 : struct.colStats) + for (ColumnStatisticsObj _iter320 : struct.colStats) { - _iter288.write(oprot); + _iter320.write(oprot); } } oprot.writeI64(struct.partsFound); @@ -620,14 +620,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colStats = new ArrayList(_list289.size); - ColumnStatisticsObj _elem290; - for (int _i291 = 0; _i291 < _list289.size; ++_i291) + org.apache.thrift.protocol.TList _list321 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list321.size); + ColumnStatisticsObj _elem322; + for (int _i323 = 0; _i323 < _list321.size; ++_i323) { - _elem290 = new ColumnStatisticsObj(); - _elem290.read(iprot); - struct.colStats.add(_elem290); + _elem322 = new ColumnStatisticsObj(); + _elem322.read(iprot); + struct.colStats.add(_elem322); } } struct.setColStatsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index 5fcb98fb1b48bbff79515a984724040c5e22e602..86fb68762379a2fd840126abf959598877d002ee 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -716,13 +716,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 3: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); - struct.txnIds = new ArrayList(_list642.size); - long _elem643; - for (int _i644 = 0; _i644 < _list642.size; ++_i644) + org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list674.size); + long _elem675; + for (int _i676 = 0; _i676 < _list674.size; ++_i676) { - _elem643 = iprot.readI64(); - struct.txnIds.add(_elem643); + _elem675 = iprot.readI64(); + struct.txnIds.add(_elem675); } iprot.readListEnd(); } @@ -742,14 +742,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 5: // SRC_TXN_TO_WRITE_ID_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list645 = iprot.readListBegin(); - struct.srcTxnToWriteIdList = new ArrayList(_list645.size); - TxnToWriteId _elem646; - for (int _i647 = 0; _i647 < _list645.size; ++_i647) + org.apache.thrift.protocol.TList _list677 = iprot.readListBegin(); + struct.srcTxnToWriteIdList = new ArrayList(_list677.size); + TxnToWriteId _elem678; + for (int _i679 = 0; _i679 < _list677.size; ++_i679) { - _elem646 = new TxnToWriteId(); - _elem646.read(iprot); - struct.srcTxnToWriteIdList.add(_elem646); + _elem678 = new TxnToWriteId(); + _elem678.read(iprot); + struct.srcTxnToWriteIdList.add(_elem678); } iprot.readListEnd(); } @@ -786,9 +786,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); - for (long _iter648 : struct.txnIds) + for (long _iter680 : struct.txnIds) { - oprot.writeI64(_iter648); + oprot.writeI64(_iter680); } oprot.writeListEnd(); } @@ -807,9 +807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size())); - for (TxnToWriteId _iter649 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter681 : struct.srcTxnToWriteIdList) { - _iter649.write(oprot); + _iter681.write(oprot); } oprot.writeListEnd(); } @@ -849,9 +849,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetTxnIds()) { { oprot.writeI32(struct.txnIds.size()); - for (long _iter650 : struct.txnIds) + for (long _iter682 : struct.txnIds) { - oprot.writeI64(_iter650); + oprot.writeI64(_iter682); } } } @@ -861,9 +861,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI if (struct.isSetSrcTxnToWriteIdList()) { { oprot.writeI32(struct.srcTxnToWriteIdList.size()); - for (TxnToWriteId _iter651 : struct.srcTxnToWriteIdList) + for (TxnToWriteId _iter683 : struct.srcTxnToWriteIdList) { - _iter651.write(oprot); + _iter683.write(oprot); } } } @@ -879,13 +879,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list652 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txnIds = new ArrayList(_list652.size); - long _elem653; - for (int _i654 = 0; _i654 < _list652.size; ++_i654) + org.apache.thrift.protocol.TList _list684 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list684.size); + long _elem685; + for (int _i686 = 0; _i686 < _list684.size; ++_i686) { - _elem653 = iprot.readI64(); - struct.txnIds.add(_elem653); + _elem685 = iprot.readI64(); + struct.txnIds.add(_elem685); } } struct.setTxnIdsIsSet(true); @@ -896,14 +896,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteId } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.srcTxnToWriteIdList = new ArrayList(_list655.size); - TxnToWriteId _elem656; - for (int _i657 = 0; _i657 < _list655.size; ++_i657) + org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.srcTxnToWriteIdList = new ArrayList(_list687.size); + TxnToWriteId _elem688; + for (int _i689 = 0; _i689 < _list687.size; ++_i689) { - _elem656 = new TxnToWriteId(); - _elem656.read(iprot); - struct.srcTxnToWriteIdList.add(_elem656); + _elem688 = new TxnToWriteId(); + _elem688.read(iprot); + struct.srcTxnToWriteIdList.add(_elem688); } } struct.setSrcTxnToWriteIdListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java index 2a13eba708f177b26bbbd7455ce71101a33ea0b5..bb04c0b9749aede98de30069981ac43b4ece7a83 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_TO_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); - struct.txnToWriteIds = new ArrayList(_list658.size); - TxnToWriteId _elem659; - for (int _i660 = 0; _i660 < _list658.size; ++_i660) + org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list690.size); + TxnToWriteId _elem691; + for (int _i692 = 0; _i692 < _list690.size; ++_i692) { - _elem659 = new TxnToWriteId(); - _elem659.read(iprot); - struct.txnToWriteIds.add(_elem659); + _elem691 = new TxnToWriteId(); + _elem691.read(iprot); + struct.txnToWriteIds.add(_elem691); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); - for (TxnToWriteId _iter661 : struct.txnToWriteIds) + for (TxnToWriteId _iter693 : struct.txnToWriteIds) { - _iter661.write(oprot); + _iter693.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnToWriteIds.size()); - for (TxnToWriteId _iter662 : struct.txnToWriteIds) + for (TxnToWriteId _iter694 : struct.txnToWriteIds) { - _iter662.write(oprot); + _iter694.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.txnToWriteIds = new ArrayList(_list663.size); - TxnToWriteId _elem664; - for (int _i665 = 0; _i665 < _list663.size; ++_i665) + org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list695.size); + TxnToWriteId _elem696; + for (int _i697 = 0; _i697 < _list695.size; ++_i697) { - _elem664 = new TxnToWriteId(); - _elem664.read(iprot); - struct.txnToWriteIds.add(_elem664); + _elem696 = new TxnToWriteId(); + _elem696.read(iprot); + struct.txnToWriteIds.add(_elem696); } } struct.setTxnToWriteIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java index 4d4595a429b8437e187c05cd52795f9e4885f6c4..70b6e92d36987827dcb343f1d5f190a7c9b976ab 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java @@ -877,14 +877,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequ case 4: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list952.size); - Partition _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list984.size); + Partition _elem985; + for (int _i986 = 0; _i986 < _list984.size; ++_i986) { - _elem953 = new Partition(); - _elem953.read(iprot); - struct.partitions.add(_elem953); + _elem985 = new Partition(); + _elem985.read(iprot); + struct.partitions.add(_elem985); } iprot.readListEnd(); } @@ -952,9 +952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsReq oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter955 : struct.partitions) + for (Partition _iter987 : struct.partitions) { - _iter955.write(oprot); + _iter987.write(oprot); } oprot.writeListEnd(); } @@ -1000,9 +1000,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partitions.size()); - for (Partition _iter956 : struct.partitions) + for (Partition _iter988 : struct.partitions) { - _iter956.write(oprot); + _iter988.write(oprot); } } BitSet optionals = new BitSet(); @@ -1041,14 +1041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list957.size); - Partition _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list989.size); + Partition _elem990; + for (int _i991 = 0; _i991 < _list989.size; ++_i991) { - _elem958 = new Partition(); - _elem958.read(iprot); - struct.partitions.add(_elem958); + _elem990 = new Partition(); + _elem990.read(iprot); + struct.partitions.add(_elem990); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java index 68582058ab159126dab71e68a665a81ba9781231..b6ca3d64f838e632d11b09db6cfec04c043282f7 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsRes case 1: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list368 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list368.size); - SQLCheckConstraint _elem369; - for (int _i370 = 0; _i370 < _list368.size; ++_i370) + org.apache.thrift.protocol.TList _list400 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list400.size); + SQLCheckConstraint _elem401; + for (int _i402 = 0; _i402 < _list400.size; ++_i402) { - _elem369 = new SQLCheckConstraint(); - _elem369.read(iprot); - struct.checkConstraints.add(_elem369); + _elem401 = new SQLCheckConstraint(); + _elem401.read(iprot); + struct.checkConstraints.add(_elem401); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CheckConstraintsRe oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter371 : struct.checkConstraints) + for (SQLCheckConstraint _iter403 : struct.checkConstraints) { - _iter371.write(oprot); + _iter403.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter372 : struct.checkConstraints) + for (SQLCheckConstraint _iter404 : struct.checkConstraints) { - _iter372.write(oprot); + _iter404.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRes public void read(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list373.size); - SQLCheckConstraint _elem374; - for (int _i375 = 0; _i375 < _list373.size; ++_i375) + org.apache.thrift.protocol.TList _list405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list405.size); + SQLCheckConstraint _elem406; + for (int _i407 = 0; _i407 < _list405.size; ++_i407) { - _elem374 = new SQLCheckConstraint(); - _elem374.read(iprot); - struct.checkConstraints.add(_elem374); + _elem406 = new SQLCheckConstraint(); + _elem406.read(iprot); + struct.checkConstraints.add(_elem406); } } struct.setCheckConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index 3fdd295fb12240d7a94315a97419cc9a570c1536..1447bb489cf8c17c51a940ea70a3049ea058b67b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list832 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list832.size); - long _elem833; - for (int _i834 = 0; _i834 < _list832.size; ++_i834) + org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list864.size); + long _elem865; + for (int _i866 = 0; _i866 < _list864.size; ++_i866) { - _elem833 = iprot.readI64(); - struct.fileIds.add(_elem833); + _elem865 = iprot.readI64(); + struct.fileIds.add(_elem865); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter835 : struct.fileIds) + for (long _iter867 : struct.fileIds) { - oprot.writeI64(_iter835); + oprot.writeI64(_iter867); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter836 : struct.fileIds) + for (long _iter868 : struct.fileIds) { - oprot.writeI64(_iter836); + oprot.writeI64(_iter868); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list837 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list837.size); - long _elem838; - for (int _i839 = 0; _i839 < _list837.size; ++_i839) + org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list869.size); + long _elem870; + for (int _i871 = 0; _i871 < _list869.size; ++_i871) { - _elem838 = iprot.readI64(); - struct.fileIds.add(_elem838); + _elem870 = iprot.readI64(); + struct.fileIds.add(_elem870); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index f5c9582fa908e2d428cb897e7cc1cdd31bf45286..89c12d90cdf6b272710e2e3871e4e8ce2bb88a08 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.values = new ArrayList(_list848.size); - ClientCapability _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.values = new ArrayList(_list880.size); + ClientCapability _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) { - _elem849 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem849); + _elem881 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem881); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter851 : struct.values) + for (ClientCapability _iter883 : struct.values) { - oprot.writeI32(_iter851.getValue()); + oprot.writeI32(_iter883.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter852 : struct.values) + for (ClientCapability _iter884 : struct.values) { - oprot.writeI32(_iter852.getValue()); + oprot.writeI32(_iter884.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list853.size); - ClientCapability _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list885.size); + ClientCapability _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem854 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem854); + _elem886 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem886); } } struct.setValuesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index fd4619f0e5a9078e6e527e8f452a4d4e5da6a0b7..1eb6d1af029fc97d372969ebe32685165c57e921 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -532,14 +532,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st case 2: // STATS_OBJ if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list276 = iprot.readListBegin(); - struct.statsObj = new ArrayList(_list276.size); - ColumnStatisticsObj _elem277; - for (int _i278 = 0; _i278 < _list276.size; ++_i278) + org.apache.thrift.protocol.TList _list308 = iprot.readListBegin(); + struct.statsObj = new ArrayList(_list308.size); + ColumnStatisticsObj _elem309; + for (int _i310 = 0; _i310 < _list308.size; ++_i310) { - _elem277 = new ColumnStatisticsObj(); - _elem277.read(iprot); - struct.statsObj.add(_elem277); + _elem309 = new ColumnStatisticsObj(); + _elem309.read(iprot); + struct.statsObj.add(_elem309); } iprot.readListEnd(); } @@ -578,9 +578,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.statsObj.size())); - for (ColumnStatisticsObj _iter279 : struct.statsObj) + for (ColumnStatisticsObj _iter311 : struct.statsObj) { - _iter279.write(oprot); + _iter311.write(oprot); } oprot.writeListEnd(); } @@ -611,9 +611,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st struct.statsDesc.write(oprot); { oprot.writeI32(struct.statsObj.size()); - for (ColumnStatisticsObj _iter280 : struct.statsObj) + for (ColumnStatisticsObj _iter312 : struct.statsObj) { - _iter280.write(oprot); + _iter312.write(oprot); } } BitSet optionals = new BitSet(); @@ -633,14 +633,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str struct.statsDesc.read(iprot); struct.setStatsDescIsSet(true); { - org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.statsObj = new ArrayList(_list281.size); - ColumnStatisticsObj _elem282; - for (int _i283 = 0; _i283 < _list281.size; ++_i283) + org.apache.thrift.protocol.TList _list313 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.statsObj = new ArrayList(_list313.size); + ColumnStatisticsObj _elem314; + for (int _i315 = 0; _i315 < _list313.size; ++_i315) { - _elem282 = new ColumnStatisticsObj(); - _elem282.read(iprot); - struct.statsObj.add(_elem282); + _elem314 = new ColumnStatisticsObj(); + _elem314.read(iprot); + struct.statsObj.add(_elem314); } } struct.setStatsObjIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java index db47f9db8b094a8f34e27241e8ce33943ac90617..30fe76fedcd40a7a39dfb69e5b1032d9d541ebd9 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java @@ -533,14 +533,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CommitTxnRequest st case 3: // WRITE_EVENT_INFOS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list602 = iprot.readListBegin(); - struct.writeEventInfos = new ArrayList(_list602.size); - WriteEventInfo _elem603; - for (int _i604 = 0; _i604 < _list602.size; ++_i604) + org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); + struct.writeEventInfos = new ArrayList(_list634.size); + WriteEventInfo _elem635; + for (int _i636 = 0; _i636 < _list634.size; ++_i636) { - _elem603 = new WriteEventInfo(); - _elem603.read(iprot); - struct.writeEventInfos.add(_elem603); + _elem635 = new WriteEventInfo(); + _elem635.read(iprot); + struct.writeEventInfos.add(_elem635); } iprot.readListEnd(); } @@ -577,9 +577,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CommitTxnRequest s oprot.writeFieldBegin(WRITE_EVENT_INFOS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.writeEventInfos.size())); - for (WriteEventInfo _iter605 : struct.writeEventInfos) + for (WriteEventInfo _iter637 : struct.writeEventInfos) { - _iter605.write(oprot); + _iter637.write(oprot); } oprot.writeListEnd(); } @@ -618,9 +618,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest st if (struct.isSetWriteEventInfos()) { { oprot.writeI32(struct.writeEventInfos.size()); - for (WriteEventInfo _iter606 : struct.writeEventInfos) + for (WriteEventInfo _iter638 : struct.writeEventInfos) { - _iter606.write(oprot); + _iter638.write(oprot); } } } @@ -638,14 +638,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest str } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list607 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.writeEventInfos = new ArrayList(_list607.size); - WriteEventInfo _elem608; - for (int _i609 = 0; _i609 < _list607.size; ++_i609) + org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.writeEventInfos = new ArrayList(_list639.size); + WriteEventInfo _elem640; + for (int _i641 = 0; _i641 < _list639.size; ++_i641) { - _elem608 = new WriteEventInfo(); - _elem608.read(iprot); - struct.writeEventInfos.add(_elem608); + _elem640 = new WriteEventInfo(); + _elem640.read(iprot); + struct.writeEventInfos.add(_elem640); } } struct.setWriteEventInfosIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index dd6df744cb60f35cff5cb58e7679e4a2b8d19ffc..7d73a1ea7acf979ad963e41b5758b198c7902f3a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map698 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map698.size); - String _key699; - String _val700; - for (int _i701 = 0; _i701 < _map698.size; ++_i701) + org.apache.thrift.protocol.TMap _map730 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map730.size); + String _key731; + String _val732; + for (int _i733 = 0; _i733 < _map730.size; ++_i733) { - _key699 = iprot.readString(); - _val700 = iprot.readString(); - struct.properties.put(_key699, _val700); + _key731 = iprot.readString(); + _val732 = iprot.readString(); + struct.properties.put(_key731, _val732); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter702 : struct.properties.entrySet()) + for (Map.Entry _iter734 : struct.properties.entrySet()) { - oprot.writeString(_iter702.getKey()); - oprot.writeString(_iter702.getValue()); + oprot.writeString(_iter734.getKey()); + oprot.writeString(_iter734.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter703 : struct.properties.entrySet()) + for (Map.Entry _iter735 : struct.properties.entrySet()) { - oprot.writeString(_iter703.getKey()); - oprot.writeString(_iter703.getValue()); + oprot.writeString(_iter735.getKey()); + oprot.writeString(_iter735.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map704 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map704.size); - String _key705; - String _val706; - for (int _i707 = 0; _i707 < _map704.size; ++_i707) + org.apache.thrift.protocol.TMap _map736 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map736.size); + String _key737; + String _val738; + for (int _i739 = 0; _i739 < _map736.size; ++_i739) { - _key705 = iprot.readString(); - _val706 = iprot.readString(); - struct.properties.put(_key705, _val706); + _key737 = iprot.readString(); + _val738 = iprot.readString(); + struct.properties.put(_key737, _val738); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index d631e21cec66d519e376625e6c3f41d071716834..a4fe79f5d294911650860c65b2ac9eaaa63bfece 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -792,13 +792,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st case 4: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set724 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set724.size); - String _elem725; - for (int _i726 = 0; _i726 < _set724.size; ++_i726) + org.apache.thrift.protocol.TSet _set756 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set756.size); + String _elem757; + for (int _i758 = 0; _i758 < _set756.size; ++_i758) { - _elem725 = iprot.readString(); - struct.tablesUsed.add(_elem725); + _elem757 = iprot.readString(); + struct.tablesUsed.add(_elem757); } iprot.readSetEnd(); } @@ -855,9 +855,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter727 : struct.tablesUsed) + for (String _iter759 : struct.tablesUsed) { - oprot.writeString(_iter727); + oprot.writeString(_iter759); } oprot.writeSetEnd(); } @@ -897,9 +897,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st oprot.writeString(struct.tblName); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter728 : struct.tablesUsed) + for (String _iter760 : struct.tablesUsed) { - oprot.writeString(_iter728); + oprot.writeString(_iter760); } } BitSet optionals = new BitSet(); @@ -928,13 +928,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata str struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TSet _set729 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set729.size); - String _elem730; - for (int _i731 = 0; _i731 < _set729.size; ++_i731) + org.apache.thrift.protocol.TSet _set761 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set761.size); + String _elem762; + for (int _i763 = 0; _i763 < _set761.size; ++_i763) { - _elem730 = iprot.readString(); - struct.tablesUsed.add(_elem730); + _elem762 = iprot.readString(); + struct.tablesUsed.add(_elem762); } } struct.setTablesUsedIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java index e29932c5abe88217414c31c81d431dfc519e2969..5ee4ae9c3687ccaf14434687ee347a6e2ab4abe6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR case 1: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list360 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list360.size); - SQLDefaultConstraint _elem361; - for (int _i362 = 0; _i362 < _list360.size; ++_i362) + org.apache.thrift.protocol.TList _list392 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list392.size); + SQLDefaultConstraint _elem393; + for (int _i394 = 0; _i394 < _list392.size; ++_i394) { - _elem361 = new SQLDefaultConstraint(); - _elem361.read(iprot); - struct.defaultConstraints.add(_elem361); + _elem393 = new SQLDefaultConstraint(); + _elem393.read(iprot); + struct.defaultConstraints.add(_elem393); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraints oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter363 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter395 : struct.defaultConstraints) { - _iter363.write(oprot); + _iter395.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter364 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter396 : struct.defaultConstraints) { - _iter364.write(oprot); + _iter396.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list365.size); - SQLDefaultConstraint _elem366; - for (int _i367 = 0; _i367 < _list365.size; ++_i367) + org.apache.thrift.protocol.TList _list397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list397.size); + SQLDefaultConstraint _elem398; + for (int _i399 = 0; _i399 < _list397.size; ++_i399) { - _elem366 = new SQLDefaultConstraint(); - _elem366.read(iprot); - struct.defaultConstraints.add(_elem366); + _elem398 = new SQLDefaultConstraint(); + _elem398.read(iprot); + struct.defaultConstraints.add(_elem398); } } struct.setDefaultConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index 0a7d3b5bca893f7c673bc60fd16b655f362a33e2..b30ea90aef7f9b219cdd3d1b2bccc7548fad5e82 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResul case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list498 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list498.size); - Partition _elem499; - for (int _i500 = 0; _i500 < _list498.size; ++_i500) + org.apache.thrift.protocol.TList _list530 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list530.size); + Partition _elem531; + for (int _i532 = 0; _i532 < _list530.size; ++_i532) { - _elem499 = new Partition(); - _elem499.read(iprot); - struct.partitions.add(_elem499); + _elem531 = new Partition(); + _elem531.read(iprot); + struct.partitions.add(_elem531); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResu oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter501 : struct.partitions) + for (Partition _iter533 : struct.partitions) { - _iter501.write(oprot); + _iter533.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResul if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter502 : struct.partitions) + for (Partition _iter534 : struct.partitions) { - _iter502.write(oprot); + _iter534.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list503 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list503.size); - Partition _elem504; - for (int _i505 = 0; _i505 < _list503.size; ++_i505) + org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list535.size); + Partition _elem536; + for (int _i537 = 0; _i537 < _list535.size; ++_i537) { - _elem504 = new Partition(); - _elem504.read(iprot); - struct.partitions.add(_elem504); + _elem536 = new Partition(); + _elem536.read(iprot); + struct.partitions.add(_elem536); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java index a128dacd898ea548b535373ffdd87d8b5398056c..0c44db6e04db167b416eaa83f745df9b885d0984 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java @@ -344,15 +344,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, EnvironmentContext case 1: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map318 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map318.size); - String _key319; - String _val320; - for (int _i321 = 0; _i321 < _map318.size; ++_i321) + org.apache.thrift.protocol.TMap _map350 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map350.size); + String _key351; + String _val352; + for (int _i353 = 0; _i353 < _map350.size; ++_i353) { - _key319 = iprot.readString(); - _val320 = iprot.readString(); - struct.properties.put(_key319, _val320); + _key351 = iprot.readString(); + _val352 = iprot.readString(); + struct.properties.put(_key351, _val352); } iprot.readMapEnd(); } @@ -378,10 +378,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, EnvironmentContext oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter322 : struct.properties.entrySet()) + for (Map.Entry _iter354 : struct.properties.entrySet()) { - oprot.writeString(_iter322.getKey()); - oprot.writeString(_iter322.getValue()); + oprot.writeString(_iter354.getKey()); + oprot.writeString(_iter354.getValue()); } oprot.writeMapEnd(); } @@ -412,10 +412,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter323 : struct.properties.entrySet()) + for (Map.Entry _iter355 : struct.properties.entrySet()) { - oprot.writeString(_iter323.getKey()); - oprot.writeString(_iter323.getValue()); + oprot.writeString(_iter355.getKey()); + oprot.writeString(_iter355.getValue()); } } } @@ -427,15 +427,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext s BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map324 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map324.size); - String _key325; - String _val326; - for (int _i327 = 0; _i327 < _map324.size; ++_i327) + org.apache.thrift.protocol.TMap _map356 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map356.size); + String _key357; + String _val358; + for (int _i359 = 0; _i359 < _map356.size; ++_i359) { - _key325 = iprot.readString(); - _val326 = iprot.readString(); - struct.properties.put(_key325, _val326); + _key357 = iprot.readString(); + _val358 = iprot.readString(); + struct.properties.put(_key357, _val358); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java index 8f5b4e5bb490310a7e23e718ae8984a64f26087e..f2fffc6cc313c6e586cd3f4860824f3c266a4b90 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRe case 1: // SCHEMA_VERSIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); - struct.schemaVersions = new ArrayList(_list944.size); - SchemaVersionDescriptor _elem945; - for (int _i946 = 0; _i946 < _list944.size; ++_i946) + org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); + struct.schemaVersions = new ArrayList(_list976.size); + SchemaVersionDescriptor _elem977; + for (int _i978 = 0; _i978 < _list976.size; ++_i978) { - _elem945 = new SchemaVersionDescriptor(); - _elem945.read(iprot); - struct.schemaVersions.add(_elem945); + _elem977 = new SchemaVersionDescriptor(); + _elem977.read(iprot); + struct.schemaVersions.add(_elem977); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsR oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size())); - for (SchemaVersionDescriptor _iter947 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter979 : struct.schemaVersions) { - _iter947.write(oprot); + _iter979.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRe if (struct.isSetSchemaVersions()) { { oprot.writeI32(struct.schemaVersions.size()); - for (SchemaVersionDescriptor _iter948 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter980 : struct.schemaVersions) { - _iter948.write(oprot); + _iter980.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRes BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.schemaVersions = new ArrayList(_list949.size); - SchemaVersionDescriptor _elem950; - for (int _i951 = 0; _i951 < _list949.size; ++_i951) + org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.schemaVersions = new ArrayList(_list981.size); + SchemaVersionDescriptor _elem982; + for (int _i983 = 0; _i983 < _list981.size; ++_i983) { - _elem950 = new SchemaVersionDescriptor(); - _elem950.read(iprot); - struct.schemaVersions.add(_elem950); + _elem982 = new SchemaVersionDescriptor(); + _elem982.read(iprot); + struct.schemaVersions.add(_elem982); } } struct.setSchemaVersionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index dc2627a1fbdf9c8b5a03055fb071280f8aa2f5f9..8eb03ad0d73bc8692616689d35f1d95d6a9d7e66 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -794,13 +794,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list764.size); - String _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list796.size); + String _elem797; + for (int _i798 = 0; _i798 < _list796.size; ++_i798) { - _elem765 = iprot.readString(); - struct.partitionVals.add(_elem765); + _elem797 = iprot.readString(); + struct.partitionVals.add(_elem797); } iprot.readListEnd(); } @@ -857,9 +857,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter767 : struct.partitionVals) + for (String _iter799 : struct.partitionVals) { - oprot.writeString(_iter767); + oprot.writeString(_iter799); } oprot.writeListEnd(); } @@ -915,9 +915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter768 : struct.partitionVals) + for (String _iter800 : struct.partitionVals) { - oprot.writeString(_iter768); + oprot.writeString(_iter800); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list769.size); - String _elem770; - for (int _i771 = 0; _i771 < _list769.size; ++_i771) + org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list801.size); + String _elem802; + for (int _i803 = 0; _i803 < _list801.size; ++_i803) { - _elem770 = iprot.readString(); - struct.partitionVals.add(_elem770); + _elem802 = iprot.readString(); + struct.partitionVals.add(_elem802); } } struct.setPartitionValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java index 8fae31cba03951f423c4a5a1d59cb2ae2f0a3b2c..0f4f299146e5e2a9b6055f789383391381f0e13f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysResponse case 1: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list336 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list336.size); - SQLForeignKey _elem337; - for (int _i338 = 0; _i338 < _list336.size; ++_i338) + org.apache.thrift.protocol.TList _list368 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list368.size); + SQLForeignKey _elem369; + for (int _i370 = 0; _i370 < _list368.size; ++_i370) { - _elem337 = new SQLForeignKey(); - _elem337.read(iprot); - struct.foreignKeys.add(_elem337); + _elem369 = new SQLForeignKey(); + _elem369.read(iprot); + struct.foreignKeys.add(_elem369); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysRespons oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter339 : struct.foreignKeys) + for (SQLForeignKey _iter371 : struct.foreignKeys) { - _iter339.write(oprot); + _iter371.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter340 : struct.foreignKeys) + for (SQLForeignKey _iter372 : struct.foreignKeys) { - _iter340.write(oprot); + _iter372.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list341.size); - SQLForeignKey _elem342; - for (int _i343 = 0; _i343 < _list341.size; ++_i343) + org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list373.size); + SQLForeignKey _elem374; + for (int _i375 = 0; _i375 < _list373.size; ++_i375) { - _elem342 = new SQLForeignKey(); - _elem342.read(iprot); - struct.foreignKeys.add(_elem342); + _elem374 = new SQLForeignKey(); + _elem374.read(iprot); + struct.foreignKeys.add(_elem374); } } struct.setForeignKeysIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index ce0feb929d705bef7bcc50f02d7e9910df9708a5..04695658ec03dac95ee086974dbc431f80228959 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -1079,14 +1079,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th case 8: // RESOURCE_URIS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list554 = iprot.readListBegin(); - struct.resourceUris = new ArrayList(_list554.size); - ResourceUri _elem555; - for (int _i556 = 0; _i556 < _list554.size; ++_i556) + org.apache.thrift.protocol.TList _list586 = iprot.readListBegin(); + struct.resourceUris = new ArrayList(_list586.size); + ResourceUri _elem587; + for (int _i588 = 0; _i588 < _list586.size; ++_i588) { - _elem555 = new ResourceUri(); - _elem555.read(iprot); - struct.resourceUris.add(_elem555); + _elem587 = new ResourceUri(); + _elem587.read(iprot); + struct.resourceUris.add(_elem587); } iprot.readListEnd(); } @@ -1153,9 +1153,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size())); - for (ResourceUri _iter557 : struct.resourceUris) + for (ResourceUri _iter589 : struct.resourceUris) { - _iter557.write(oprot); + _iter589.write(oprot); } oprot.writeListEnd(); } @@ -1238,9 +1238,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { { oprot.writeI32(struct.resourceUris.size()); - for (ResourceUri _iter558 : struct.resourceUris) + for (ResourceUri _iter590 : struct.resourceUris) { - _iter558.write(oprot); + _iter590.write(oprot); } } } @@ -1283,14 +1283,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list559 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourceUris = new ArrayList(_list559.size); - ResourceUri _elem560; - for (int _i561 = 0; _i561 < _list559.size; ++_i561) + org.apache.thrift.protocol.TList _list591 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourceUris = new ArrayList(_list591.size); + ResourceUri _elem592; + for (int _i593 = 0; _i593 < _list591.size; ++_i593) { - _elem560 = new ResourceUri(); - _elem560.read(iprot); - struct.resourceUris.add(_elem560); + _elem592 = new ResourceUri(); + _elem592.read(iprot); + struct.resourceUris.add(_elem592); } } struct.setResourceUrisIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index 13fe5fa6d5295158287dbd1c472f7b9881fd65c5..80ade2246d2c574195e2a6c06c64495a2b62023d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list840 = iprot.readListBegin(); - struct.functions = new ArrayList(_list840.size); - Function _elem841; - for (int _i842 = 0; _i842 < _list840.size; ++_i842) + org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); + struct.functions = new ArrayList(_list872.size); + Function _elem873; + for (int _i874 = 0; _i874 < _list872.size; ++_i874) { - _elem841 = new Function(); - _elem841.read(iprot); - struct.functions.add(_elem841); + _elem873 = new Function(); + _elem873.read(iprot); + struct.functions.add(_elem873); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter843 : struct.functions) + for (Function _iter875 : struct.functions) { - _iter843.write(oprot); + _iter875.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter844 : struct.functions) + for (Function _iter876 : struct.functions) { - _iter844.write(oprot); + _iter876.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list845.size); - Function _elem846; - for (int _i847 = 0; _i847 < _list845.size; ++_i847) + org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list877.size); + Function _elem878; + for (int _i879 = 0; _i879 < _list877.size; ++_i879) { - _elem846 = new Function(); - _elem846.read(iprot); - struct.functions.add(_elem846); + _elem878 = new Function(); + _elem878.read(iprot); + struct.functions.add(_elem878); } } struct.setFunctionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 976bf001a0428e5b04da9d1ea6294344a1499c69..8392709d458d80044529c0bb41c46cbe229ca33a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list790 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list790.size); - long _elem791; - for (int _i792 = 0; _i792 < _list790.size; ++_i792) + org.apache.thrift.protocol.TList _list822 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list822.size); + long _elem823; + for (int _i824 = 0; _i824 < _list822.size; ++_i824) { - _elem791 = iprot.readI64(); - struct.fileIds.add(_elem791); + _elem823 = iprot.readI64(); + struct.fileIds.add(_elem823); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter793 : struct.fileIds) + for (long _iter825 : struct.fileIds) { - oprot.writeI64(_iter793); + oprot.writeI64(_iter825); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter794 : struct.fileIds) + for (long _iter826 : struct.fileIds) { - oprot.writeI64(_iter794); + oprot.writeI64(_iter826); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list795 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list795.size); - long _elem796; - for (int _i797 = 0; _i797 < _list795.size; ++_i797) + org.apache.thrift.protocol.TList _list827 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list827.size); + long _elem828; + for (int _i829 = 0; _i829 < _list827.size; ++_i829) { - _elem796 = iprot.readI64(); - struct.fileIds.add(_elem796); + _elem828 = iprot.readI64(); + struct.fileIds.add(_elem828); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 16a0113ac00c5bbd908cd83379fe63a88f0e1f62..bf2c38419be00aae424bba9e52309b9f51990c74 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map780 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map780.size); - long _key781; - MetadataPpdResult _val782; - for (int _i783 = 0; _i783 < _map780.size; ++_i783) + org.apache.thrift.protocol.TMap _map812 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map812.size); + long _key813; + MetadataPpdResult _val814; + for (int _i815 = 0; _i815 < _map812.size; ++_i815) { - _key781 = iprot.readI64(); - _val782 = new MetadataPpdResult(); - _val782.read(iprot); - struct.metadata.put(_key781, _val782); + _key813 = iprot.readI64(); + _val814 = new MetadataPpdResult(); + _val814.read(iprot); + struct.metadata.put(_key813, _val814); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter784 : struct.metadata.entrySet()) + for (Map.Entry _iter816 : struct.metadata.entrySet()) { - oprot.writeI64(_iter784.getKey()); - _iter784.getValue().write(oprot); + oprot.writeI64(_iter816.getKey()); + _iter816.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter785 : struct.metadata.entrySet()) + for (Map.Entry _iter817 : struct.metadata.entrySet()) { - oprot.writeI64(_iter785.getKey()); - _iter785.getValue().write(oprot); + oprot.writeI64(_iter817.getKey()); + _iter817.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map786 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map786.size); - long _key787; - MetadataPpdResult _val788; - for (int _i789 = 0; _i789 < _map786.size; ++_i789) + org.apache.thrift.protocol.TMap _map818 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map818.size); + long _key819; + MetadataPpdResult _val820; + for (int _i821 = 0; _i821 < _map818.size; ++_i821) { - _key787 = iprot.readI64(); - _val788 = new MetadataPpdResult(); - _val788.read(iprot); - struct.metadata.put(_key787, _val788); + _key819 = iprot.readI64(); + _val820 = new MetadataPpdResult(); + _val820.read(iprot); + struct.metadata.put(_key819, _val820); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 9e3ed8b28220f5729735aab8a4be86851210c49e..2719d51f8c2a39e134a998ecdaa82d9e64b3a5f3 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list808 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list808.size); - long _elem809; - for (int _i810 = 0; _i810 < _list808.size; ++_i810) + org.apache.thrift.protocol.TList _list840 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list840.size); + long _elem841; + for (int _i842 = 0; _i842 < _list840.size; ++_i842) { - _elem809 = iprot.readI64(); - struct.fileIds.add(_elem809); + _elem841 = iprot.readI64(); + struct.fileIds.add(_elem841); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter811 : struct.fileIds) + for (long _iter843 : struct.fileIds) { - oprot.writeI64(_iter811); + oprot.writeI64(_iter843); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter812 : struct.fileIds) + for (long _iter844 : struct.fileIds) { - oprot.writeI64(_iter812); + oprot.writeI64(_iter844); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list813 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list813.size); - long _elem814; - for (int _i815 = 0; _i815 < _list813.size; ++_i815) + org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list845.size); + long _elem846; + for (int _i847 = 0; _i847 < _list845.size; ++_i847) { - _elem814 = iprot.readI64(); - struct.fileIds.add(_elem814); + _elem846 = iprot.readI64(); + struct.fileIds.add(_elem846); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index bc73f1ec272ddf3c0dd2d56a3f342b79a0c24644..798b536ab8f46241b91d85d1e83b29cf2487e6d2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map798 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map798.size); - long _key799; - ByteBuffer _val800; - for (int _i801 = 0; _i801 < _map798.size; ++_i801) + org.apache.thrift.protocol.TMap _map830 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map830.size); + long _key831; + ByteBuffer _val832; + for (int _i833 = 0; _i833 < _map830.size; ++_i833) { - _key799 = iprot.readI64(); - _val800 = iprot.readBinary(); - struct.metadata.put(_key799, _val800); + _key831 = iprot.readI64(); + _val832 = iprot.readBinary(); + struct.metadata.put(_key831, _val832); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter802 : struct.metadata.entrySet()) + for (Map.Entry _iter834 : struct.metadata.entrySet()) { - oprot.writeI64(_iter802.getKey()); - oprot.writeBinary(_iter802.getValue()); + oprot.writeI64(_iter834.getKey()); + oprot.writeBinary(_iter834.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter803 : struct.metadata.entrySet()) + for (Map.Entry _iter835 : struct.metadata.entrySet()) { - oprot.writeI64(_iter803.getKey()); - oprot.writeBinary(_iter803.getValue()); + oprot.writeI64(_iter835.getKey()); + oprot.writeBinary(_iter835.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map804 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map804.size); - long _key805; - ByteBuffer _val806; - for (int _i807 = 0; _i807 < _map804.size; ++_i807) + org.apache.thrift.protocol.TMap _map836 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map836.size); + long _key837; + ByteBuffer _val838; + for (int _i839 = 0; _i839 < _map836.size; ++_i839) { - _key805 = iprot.readI64(); - _val806 = iprot.readBinary(); - struct.metadata.put(_key805, _val806); + _key837 = iprot.readI64(); + _val838 = iprot.readBinary(); + struct.metadata.put(_key837, _val838); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index 93ca30315749eab599871b8303d6b022cdf91d98..68d97222742909c06988dbaad895eed2a0cdb7b2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -447,14 +447,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResp case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list562 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list562.size); - TxnInfo _elem563; - for (int _i564 = 0; _i564 < _list562.size; ++_i564) + org.apache.thrift.protocol.TList _list594 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list594.size); + TxnInfo _elem595; + for (int _i596 = 0; _i596 < _list594.size; ++_i596) { - _elem563 = new TxnInfo(); - _elem563.read(iprot); - struct.open_txns.add(_elem563); + _elem595 = new TxnInfo(); + _elem595.read(iprot); + struct.open_txns.add(_elem595); } iprot.readListEnd(); } @@ -483,9 +483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoRes oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size())); - for (TxnInfo _iter565 : struct.open_txns) + for (TxnInfo _iter597 : struct.open_txns) { - _iter565.write(oprot); + _iter597.write(oprot); } oprot.writeListEnd(); } @@ -511,9 +511,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResp oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (TxnInfo _iter566 : struct.open_txns) + for (TxnInfo _iter598 : struct.open_txns) { - _iter566.write(oprot); + _iter598.write(oprot); } } } @@ -524,14 +524,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoRespo struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list567 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.open_txns = new ArrayList(_list567.size); - TxnInfo _elem568; - for (int _i569 = 0; _i569 < _list567.size; ++_i569) + org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.open_txns = new ArrayList(_list599.size); + TxnInfo _elem600; + for (int _i601 = 0; _i601 < _list599.size; ++_i601) { - _elem568 = new TxnInfo(); - _elem568.read(iprot); - struct.open_txns.add(_elem568); + _elem600 = new TxnInfo(); + _elem600.read(iprot); + struct.open_txns.add(_elem600); } } struct.setOpen_txnsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index c152a0aab70ec496d3e84d7ac9d37f211f97e873..c3a106244a1f262422bab4b64ea908fc9efeba1b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -615,13 +615,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list570 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list570.size); - long _elem571; - for (int _i572 = 0; _i572 < _list570.size; ++_i572) + org.apache.thrift.protocol.TList _list602 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list602.size); + long _elem603; + for (int _i604 = 0; _i604 < _list602.size; ++_i604) { - _elem571 = iprot.readI64(); - struct.open_txns.add(_elem571); + _elem603 = iprot.readI64(); + struct.open_txns.add(_elem603); } iprot.readListEnd(); } @@ -666,9 +666,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRespons oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter573 : struct.open_txns) + for (long _iter605 : struct.open_txns) { - oprot.writeI64(_iter573); + oprot.writeI64(_iter605); } oprot.writeListEnd(); } @@ -704,9 +704,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter574 : struct.open_txns) + for (long _iter606 : struct.open_txns) { - oprot.writeI64(_iter574); + oprot.writeI64(_iter606); } } oprot.writeBinary(struct.abortedBits); @@ -726,13 +726,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list575 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new ArrayList(_list575.size); - long _elem576; - for (int _i577 = 0; _i577 < _list575.size; ++_i577) + org.apache.thrift.protocol.TList _list607 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new ArrayList(_list607.size); + long _elem608; + for (int _i609 = 0; _i609 < _list607.size; ++_i609) { - _elem576 = iprot.readI64(); - struct.open_txns.add(_elem576); + _elem608 = iprot.readI64(); + struct.open_txns.add(_elem608); } } struct.setOpen_txnsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java new file mode 100644 index 0000000000000000000000000000000000000000..1edc5758697ac5b204983e711cc05954e459636a --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsFilterSpec.java @@ -0,0 +1,1137 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPartitionsFilterSpec implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPartitionsFilterSpec"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField WITH_AUTH_FIELD_DESC = new org.apache.thrift.protocol.TField("withAuth", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField GROUP_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("groupNames", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField FILTER_MODE_FIELD_DESC = new org.apache.thrift.protocol.TField("filterMode", org.apache.thrift.protocol.TType.I32, (short)6); + private static final org.apache.thrift.protocol.TField FILTERS_FIELD_DESC = new org.apache.thrift.protocol.TField("filters", org.apache.thrift.protocol.TType.LIST, (short)7); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetPartitionsFilterSpecStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetPartitionsFilterSpecTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + private boolean withAuth; // optional + private String user; // optional + private List groupNames; // optional + private PartitionFilterMode filterMode; // optional + private List filters; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"), + WITH_AUTH((short)3, "withAuth"), + USER((short)4, "user"), + GROUP_NAMES((short)5, "groupNames"), + /** + * + * @see PartitionFilterMode + */ + FILTER_MODE((short)6, "filterMode"), + FILTERS((short)7, "filters"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // WITH_AUTH + return WITH_AUTH; + case 4: // USER + return USER; + case 5: // GROUP_NAMES + return GROUP_NAMES; + case 6: // FILTER_MODE + return FILTER_MODE; + case 7: // FILTERS + return FILTERS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WITHAUTH_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.WITH_AUTH,_Fields.USER,_Fields.GROUP_NAMES,_Fields.FILTER_MODE,_Fields.FILTERS}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WITH_AUTH, new org.apache.thrift.meta_data.FieldMetaData("withAuth", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.USER, new org.apache.thrift.meta_data.FieldMetaData("user", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.GROUP_NAMES, new org.apache.thrift.meta_data.FieldMetaData("groupNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.FILTER_MODE, new org.apache.thrift.meta_data.FieldMetaData("filterMode", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PartitionFilterMode.class))); + tmpMap.put(_Fields.FILTERS, new org.apache.thrift.meta_data.FieldMetaData("filters", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsFilterSpec.class, metaDataMap); + } + + public GetPartitionsFilterSpec() { + } + + public GetPartitionsFilterSpec( + String dbName, + String tblName) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + } + + /** + * Performs a deep copy on other. + */ + public GetPartitionsFilterSpec(GetPartitionsFilterSpec other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + this.withAuth = other.withAuth; + if (other.isSetUser()) { + this.user = other.user; + } + if (other.isSetGroupNames()) { + List __this__groupNames = new ArrayList(other.groupNames); + this.groupNames = __this__groupNames; + } + if (other.isSetFilterMode()) { + this.filterMode = other.filterMode; + } + if (other.isSetFilters()) { + List __this__filters = new ArrayList(other.filters); + this.filters = __this__filters; + } + } + + public GetPartitionsFilterSpec deepCopy() { + return new GetPartitionsFilterSpec(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + setWithAuthIsSet(false); + this.withAuth = false; + this.user = null; + this.groupNames = null; + this.filterMode = null; + this.filters = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public boolean isWithAuth() { + return this.withAuth; + } + + public void setWithAuth(boolean withAuth) { + this.withAuth = withAuth; + setWithAuthIsSet(true); + } + + public void unsetWithAuth() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WITHAUTH_ISSET_ID); + } + + /** Returns true if field withAuth is set (has been assigned a value) and false otherwise */ + public boolean isSetWithAuth() { + return EncodingUtils.testBit(__isset_bitfield, __WITHAUTH_ISSET_ID); + } + + public void setWithAuthIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WITHAUTH_ISSET_ID, value); + } + + public String getUser() { + return this.user; + } + + public void setUser(String user) { + this.user = user; + } + + public void unsetUser() { + this.user = null; + } + + /** Returns true if field user is set (has been assigned a value) and false otherwise */ + public boolean isSetUser() { + return this.user != null; + } + + public void setUserIsSet(boolean value) { + if (!value) { + this.user = null; + } + } + + public int getGroupNamesSize() { + return (this.groupNames == null) ? 0 : this.groupNames.size(); + } + + public java.util.Iterator getGroupNamesIterator() { + return (this.groupNames == null) ? null : this.groupNames.iterator(); + } + + public void addToGroupNames(String elem) { + if (this.groupNames == null) { + this.groupNames = new ArrayList(); + } + this.groupNames.add(elem); + } + + public List getGroupNames() { + return this.groupNames; + } + + public void setGroupNames(List groupNames) { + this.groupNames = groupNames; + } + + public void unsetGroupNames() { + this.groupNames = null; + } + + /** Returns true if field groupNames is set (has been assigned a value) and false otherwise */ + public boolean isSetGroupNames() { + return this.groupNames != null; + } + + public void setGroupNamesIsSet(boolean value) { + if (!value) { + this.groupNames = null; + } + } + + /** + * + * @see PartitionFilterMode + */ + public PartitionFilterMode getFilterMode() { + return this.filterMode; + } + + /** + * + * @see PartitionFilterMode + */ + public void setFilterMode(PartitionFilterMode filterMode) { + this.filterMode = filterMode; + } + + public void unsetFilterMode() { + this.filterMode = null; + } + + /** Returns true if field filterMode is set (has been assigned a value) and false otherwise */ + public boolean isSetFilterMode() { + return this.filterMode != null; + } + + public void setFilterModeIsSet(boolean value) { + if (!value) { + this.filterMode = null; + } + } + + public int getFiltersSize() { + return (this.filters == null) ? 0 : this.filters.size(); + } + + public java.util.Iterator getFiltersIterator() { + return (this.filters == null) ? null : this.filters.iterator(); + } + + public void addToFilters(String elem) { + if (this.filters == null) { + this.filters = new ArrayList(); + } + this.filters.add(elem); + } + + public List getFilters() { + return this.filters; + } + + public void setFilters(List filters) { + this.filters = filters; + } + + public void unsetFilters() { + this.filters = null; + } + + /** Returns true if field filters is set (has been assigned a value) and false otherwise */ + public boolean isSetFilters() { + return this.filters != null; + } + + public void setFiltersIsSet(boolean value) { + if (!value) { + this.filters = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + case WITH_AUTH: + if (value == null) { + unsetWithAuth(); + } else { + setWithAuth((Boolean)value); + } + break; + + case USER: + if (value == null) { + unsetUser(); + } else { + setUser((String)value); + } + break; + + case GROUP_NAMES: + if (value == null) { + unsetGroupNames(); + } else { + setGroupNames((List)value); + } + break; + + case FILTER_MODE: + if (value == null) { + unsetFilterMode(); + } else { + setFilterMode((PartitionFilterMode)value); + } + break; + + case FILTERS: + if (value == null) { + unsetFilters(); + } else { + setFilters((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + case WITH_AUTH: + return isWithAuth(); + + case USER: + return getUser(); + + case GROUP_NAMES: + return getGroupNames(); + + case FILTER_MODE: + return getFilterMode(); + + case FILTERS: + return getFilters(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + case WITH_AUTH: + return isSetWithAuth(); + case USER: + return isSetUser(); + case GROUP_NAMES: + return isSetGroupNames(); + case FILTER_MODE: + return isSetFilterMode(); + case FILTERS: + return isSetFilters(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetPartitionsFilterSpec) + return this.equals((GetPartitionsFilterSpec)that); + return false; + } + + public boolean equals(GetPartitionsFilterSpec that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + boolean this_present_withAuth = true && this.isSetWithAuth(); + boolean that_present_withAuth = true && that.isSetWithAuth(); + if (this_present_withAuth || that_present_withAuth) { + if (!(this_present_withAuth && that_present_withAuth)) + return false; + if (this.withAuth != that.withAuth) + return false; + } + + boolean this_present_user = true && this.isSetUser(); + boolean that_present_user = true && that.isSetUser(); + if (this_present_user || that_present_user) { + if (!(this_present_user && that_present_user)) + return false; + if (!this.user.equals(that.user)) + return false; + } + + boolean this_present_groupNames = true && this.isSetGroupNames(); + boolean that_present_groupNames = true && that.isSetGroupNames(); + if (this_present_groupNames || that_present_groupNames) { + if (!(this_present_groupNames && that_present_groupNames)) + return false; + if (!this.groupNames.equals(that.groupNames)) + return false; + } + + boolean this_present_filterMode = true && this.isSetFilterMode(); + boolean that_present_filterMode = true && that.isSetFilterMode(); + if (this_present_filterMode || that_present_filterMode) { + if (!(this_present_filterMode && that_present_filterMode)) + return false; + if (!this.filterMode.equals(that.filterMode)) + return false; + } + + boolean this_present_filters = true && this.isSetFilters(); + boolean that_present_filters = true && that.isSetFilters(); + if (this_present_filters || that_present_filters) { + if (!(this_present_filters && that_present_filters)) + return false; + if (!this.filters.equals(that.filters)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + boolean present_withAuth = true && (isSetWithAuth()); + list.add(present_withAuth); + if (present_withAuth) + list.add(withAuth); + + boolean present_user = true && (isSetUser()); + list.add(present_user); + if (present_user) + list.add(user); + + boolean present_groupNames = true && (isSetGroupNames()); + list.add(present_groupNames); + if (present_groupNames) + list.add(groupNames); + + boolean present_filterMode = true && (isSetFilterMode()); + list.add(present_filterMode); + if (present_filterMode) + list.add(filterMode.getValue()); + + boolean present_filters = true && (isSetFilters()); + list.add(present_filters); + if (present_filters) + list.add(filters); + + return list.hashCode(); + } + + @Override + public int compareTo(GetPartitionsFilterSpec other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWithAuth()).compareTo(other.isSetWithAuth()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWithAuth()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.withAuth, other.withAuth); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUser()).compareTo(other.isSetUser()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUser()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.user, other.user); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetGroupNames()).compareTo(other.isSetGroupNames()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetGroupNames()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.groupNames, other.groupNames); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFilterMode()).compareTo(other.isSetFilterMode()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFilterMode()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filterMode, other.filterMode); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFilters()).compareTo(other.isSetFilters()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFilters()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filters, other.filters); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetPartitionsFilterSpec("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + if (isSetWithAuth()) { + if (!first) sb.append(", "); + sb.append("withAuth:"); + sb.append(this.withAuth); + first = false; + } + if (isSetUser()) { + if (!first) sb.append(", "); + sb.append("user:"); + if (this.user == null) { + sb.append("null"); + } else { + sb.append(this.user); + } + first = false; + } + if (isSetGroupNames()) { + if (!first) sb.append(", "); + sb.append("groupNames:"); + if (this.groupNames == null) { + sb.append("null"); + } else { + sb.append(this.groupNames); + } + first = false; + } + if (isSetFilterMode()) { + if (!first) sb.append(", "); + sb.append("filterMode:"); + if (this.filterMode == null) { + sb.append("null"); + } else { + sb.append(this.filterMode); + } + first = false; + } + if (isSetFilters()) { + if (!first) sb.append(", "); + sb.append("filters:"); + if (this.filters == null) { + sb.append("null"); + } else { + sb.append(this.filters); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetPartitionsFilterSpecStandardSchemeFactory implements SchemeFactory { + public GetPartitionsFilterSpecStandardScheme getScheme() { + return new GetPartitionsFilterSpecStandardScheme(); + } + } + + private static class GetPartitionsFilterSpecStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsFilterSpec struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // WITH_AUTH + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.withAuth = iprot.readBool(); + struct.setWithAuthIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // USER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.user = iprot.readString(); + struct.setUserIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // GROUP_NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list250 = iprot.readListBegin(); + struct.groupNames = new ArrayList(_list250.size); + String _elem251; + for (int _i252 = 0; _i252 < _list250.size; ++_i252) + { + _elem251 = iprot.readString(); + struct.groupNames.add(_elem251); + } + iprot.readListEnd(); + } + struct.setGroupNamesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // FILTER_MODE + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.filterMode = org.apache.hadoop.hive.metastore.api.PartitionFilterMode.findByValue(iprot.readI32()); + struct.setFilterModeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // FILTERS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list253 = iprot.readListBegin(); + struct.filters = new ArrayList(_list253.size); + String _elem254; + for (int _i255 = 0; _i255 < _list253.size; ++_i255) + { + _elem254 = iprot.readString(); + struct.filters.add(_elem254); + } + iprot.readListEnd(); + } + struct.setFiltersIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsFilterSpec struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + if (struct.isSetWithAuth()) { + oprot.writeFieldBegin(WITH_AUTH_FIELD_DESC); + oprot.writeBool(struct.withAuth); + oprot.writeFieldEnd(); + } + if (struct.user != null) { + if (struct.isSetUser()) { + oprot.writeFieldBegin(USER_FIELD_DESC); + oprot.writeString(struct.user); + oprot.writeFieldEnd(); + } + } + if (struct.groupNames != null) { + if (struct.isSetGroupNames()) { + oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groupNames.size())); + for (String _iter256 : struct.groupNames) + { + oprot.writeString(_iter256); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + if (struct.filterMode != null) { + if (struct.isSetFilterMode()) { + oprot.writeFieldBegin(FILTER_MODE_FIELD_DESC); + oprot.writeI32(struct.filterMode.getValue()); + oprot.writeFieldEnd(); + } + } + if (struct.filters != null) { + if (struct.isSetFilters()) { + oprot.writeFieldBegin(FILTERS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filters.size())); + for (String _iter257 : struct.filters) + { + oprot.writeString(_iter257); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetPartitionsFilterSpecTupleSchemeFactory implements SchemeFactory { + public GetPartitionsFilterSpecTupleScheme getScheme() { + return new GetPartitionsFilterSpecTupleScheme(); + } + } + + private static class GetPartitionsFilterSpecTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilterSpec struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDbName()) { + optionals.set(0); + } + if (struct.isSetTblName()) { + optionals.set(1); + } + if (struct.isSetWithAuth()) { + optionals.set(2); + } + if (struct.isSetUser()) { + optionals.set(3); + } + if (struct.isSetGroupNames()) { + optionals.set(4); + } + if (struct.isSetFilterMode()) { + optionals.set(5); + } + if (struct.isSetFilters()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); + if (struct.isSetDbName()) { + oprot.writeString(struct.dbName); + } + if (struct.isSetTblName()) { + oprot.writeString(struct.tblName); + } + if (struct.isSetWithAuth()) { + oprot.writeBool(struct.withAuth); + } + if (struct.isSetUser()) { + oprot.writeString(struct.user); + } + if (struct.isSetGroupNames()) { + { + oprot.writeI32(struct.groupNames.size()); + for (String _iter258 : struct.groupNames) + { + oprot.writeString(_iter258); + } + } + } + if (struct.isSetFilterMode()) { + oprot.writeI32(struct.filterMode.getValue()); + } + if (struct.isSetFilters()) { + { + oprot.writeI32(struct.filters.size()); + for (String _iter259 : struct.filters) + { + oprot.writeString(_iter259); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsFilterSpec struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(7); + if (incoming.get(0)) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } + if (incoming.get(1)) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } + if (incoming.get(2)) { + struct.withAuth = iprot.readBool(); + struct.setWithAuthIsSet(true); + } + if (incoming.get(3)) { + struct.user = iprot.readString(); + struct.setUserIsSet(true); + } + if (incoming.get(4)) { + { + org.apache.thrift.protocol.TList _list260 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.groupNames = new ArrayList(_list260.size); + String _elem261; + for (int _i262 = 0; _i262 < _list260.size; ++_i262) + { + _elem261 = iprot.readString(); + struct.groupNames.add(_elem261); + } + } + struct.setGroupNamesIsSet(true); + } + if (incoming.get(5)) { + struct.filterMode = org.apache.hadoop.hive.metastore.api.PartitionFilterMode.findByValue(iprot.readI32()); + struct.setFilterModeIsSet(true); + } + if (incoming.get(6)) { + { + org.apache.thrift.protocol.TList _list263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filters = new ArrayList(_list263.size); + String _elem264; + for (int _i265 = 0; _i265 < _list263.size; ++_i265) + { + _elem264 = iprot.readString(); + struct.filters.add(_elem264); + } + } + struct.setFiltersIsSet(true); + } + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectSpec.java new file mode 100644 index 0000000000000000000000000000000000000000..37300f0bf74c5428aa1c095173b06e1d2a12bbbd --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsProjectSpec.java @@ -0,0 +1,650 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPartitionsProjectSpec implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPartitionsProjectSpec"); + + private static final org.apache.thrift.protocol.TField FIELD_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("fieldList", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField PARAM_KEY_PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("paramKeyPattern", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField EXCLUDE_PARAM_KEY_PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("excludeParamKeyPattern", org.apache.thrift.protocol.TType.BOOL, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetPartitionsProjectSpecStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetPartitionsProjectSpecTupleSchemeFactory()); + } + + private List fieldList; // required + private String paramKeyPattern; // required + private boolean excludeParamKeyPattern; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FIELD_LIST((short)1, "fieldList"), + PARAM_KEY_PATTERN((short)2, "paramKeyPattern"), + EXCLUDE_PARAM_KEY_PATTERN((short)3, "excludeParamKeyPattern"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FIELD_LIST + return FIELD_LIST; + case 2: // PARAM_KEY_PATTERN + return PARAM_KEY_PATTERN; + case 3: // EXCLUDE_PARAM_KEY_PATTERN + return EXCLUDE_PARAM_KEY_PATTERN; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __EXCLUDEPARAMKEYPATTERN_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FIELD_LIST, new org.apache.thrift.meta_data.FieldMetaData("fieldList", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.PARAM_KEY_PATTERN, new org.apache.thrift.meta_data.FieldMetaData("paramKeyPattern", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.EXCLUDE_PARAM_KEY_PATTERN, new org.apache.thrift.meta_data.FieldMetaData("excludeParamKeyPattern", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsProjectSpec.class, metaDataMap); + } + + public GetPartitionsProjectSpec() { + } + + public GetPartitionsProjectSpec( + List fieldList, + String paramKeyPattern, + boolean excludeParamKeyPattern) + { + this(); + this.fieldList = fieldList; + this.paramKeyPattern = paramKeyPattern; + this.excludeParamKeyPattern = excludeParamKeyPattern; + setExcludeParamKeyPatternIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public GetPartitionsProjectSpec(GetPartitionsProjectSpec other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetFieldList()) { + List __this__fieldList = new ArrayList(other.fieldList); + this.fieldList = __this__fieldList; + } + if (other.isSetParamKeyPattern()) { + this.paramKeyPattern = other.paramKeyPattern; + } + this.excludeParamKeyPattern = other.excludeParamKeyPattern; + } + + public GetPartitionsProjectSpec deepCopy() { + return new GetPartitionsProjectSpec(this); + } + + @Override + public void clear() { + this.fieldList = null; + this.paramKeyPattern = null; + setExcludeParamKeyPatternIsSet(false); + this.excludeParamKeyPattern = false; + } + + public int getFieldListSize() { + return (this.fieldList == null) ? 0 : this.fieldList.size(); + } + + public java.util.Iterator getFieldListIterator() { + return (this.fieldList == null) ? null : this.fieldList.iterator(); + } + + public void addToFieldList(String elem) { + if (this.fieldList == null) { + this.fieldList = new ArrayList(); + } + this.fieldList.add(elem); + } + + public List getFieldList() { + return this.fieldList; + } + + public void setFieldList(List fieldList) { + this.fieldList = fieldList; + } + + public void unsetFieldList() { + this.fieldList = null; + } + + /** Returns true if field fieldList is set (has been assigned a value) and false otherwise */ + public boolean isSetFieldList() { + return this.fieldList != null; + } + + public void setFieldListIsSet(boolean value) { + if (!value) { + this.fieldList = null; + } + } + + public String getParamKeyPattern() { + return this.paramKeyPattern; + } + + public void setParamKeyPattern(String paramKeyPattern) { + this.paramKeyPattern = paramKeyPattern; + } + + public void unsetParamKeyPattern() { + this.paramKeyPattern = null; + } + + /** Returns true if field paramKeyPattern is set (has been assigned a value) and false otherwise */ + public boolean isSetParamKeyPattern() { + return this.paramKeyPattern != null; + } + + public void setParamKeyPatternIsSet(boolean value) { + if (!value) { + this.paramKeyPattern = null; + } + } + + public boolean isExcludeParamKeyPattern() { + return this.excludeParamKeyPattern; + } + + public void setExcludeParamKeyPattern(boolean excludeParamKeyPattern) { + this.excludeParamKeyPattern = excludeParamKeyPattern; + setExcludeParamKeyPatternIsSet(true); + } + + public void unsetExcludeParamKeyPattern() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EXCLUDEPARAMKEYPATTERN_ISSET_ID); + } + + /** Returns true if field excludeParamKeyPattern is set (has been assigned a value) and false otherwise */ + public boolean isSetExcludeParamKeyPattern() { + return EncodingUtils.testBit(__isset_bitfield, __EXCLUDEPARAMKEYPATTERN_ISSET_ID); + } + + public void setExcludeParamKeyPatternIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EXCLUDEPARAMKEYPATTERN_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FIELD_LIST: + if (value == null) { + unsetFieldList(); + } else { + setFieldList((List)value); + } + break; + + case PARAM_KEY_PATTERN: + if (value == null) { + unsetParamKeyPattern(); + } else { + setParamKeyPattern((String)value); + } + break; + + case EXCLUDE_PARAM_KEY_PATTERN: + if (value == null) { + unsetExcludeParamKeyPattern(); + } else { + setExcludeParamKeyPattern((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FIELD_LIST: + return getFieldList(); + + case PARAM_KEY_PATTERN: + return getParamKeyPattern(); + + case EXCLUDE_PARAM_KEY_PATTERN: + return isExcludeParamKeyPattern(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FIELD_LIST: + return isSetFieldList(); + case PARAM_KEY_PATTERN: + return isSetParamKeyPattern(); + case EXCLUDE_PARAM_KEY_PATTERN: + return isSetExcludeParamKeyPattern(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetPartitionsProjectSpec) + return this.equals((GetPartitionsProjectSpec)that); + return false; + } + + public boolean equals(GetPartitionsProjectSpec that) { + if (that == null) + return false; + + boolean this_present_fieldList = true && this.isSetFieldList(); + boolean that_present_fieldList = true && that.isSetFieldList(); + if (this_present_fieldList || that_present_fieldList) { + if (!(this_present_fieldList && that_present_fieldList)) + return false; + if (!this.fieldList.equals(that.fieldList)) + return false; + } + + boolean this_present_paramKeyPattern = true && this.isSetParamKeyPattern(); + boolean that_present_paramKeyPattern = true && that.isSetParamKeyPattern(); + if (this_present_paramKeyPattern || that_present_paramKeyPattern) { + if (!(this_present_paramKeyPattern && that_present_paramKeyPattern)) + return false; + if (!this.paramKeyPattern.equals(that.paramKeyPattern)) + return false; + } + + boolean this_present_excludeParamKeyPattern = true; + boolean that_present_excludeParamKeyPattern = true; + if (this_present_excludeParamKeyPattern || that_present_excludeParamKeyPattern) { + if (!(this_present_excludeParamKeyPattern && that_present_excludeParamKeyPattern)) + return false; + if (this.excludeParamKeyPattern != that.excludeParamKeyPattern) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_fieldList = true && (isSetFieldList()); + list.add(present_fieldList); + if (present_fieldList) + list.add(fieldList); + + boolean present_paramKeyPattern = true && (isSetParamKeyPattern()); + list.add(present_paramKeyPattern); + if (present_paramKeyPattern) + list.add(paramKeyPattern); + + boolean present_excludeParamKeyPattern = true; + list.add(present_excludeParamKeyPattern); + if (present_excludeParamKeyPattern) + list.add(excludeParamKeyPattern); + + return list.hashCode(); + } + + @Override + public int compareTo(GetPartitionsProjectSpec other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetFieldList()).compareTo(other.isSetFieldList()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFieldList()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fieldList, other.fieldList); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParamKeyPattern()).compareTo(other.isSetParamKeyPattern()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParamKeyPattern()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.paramKeyPattern, other.paramKeyPattern); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetExcludeParamKeyPattern()).compareTo(other.isSetExcludeParamKeyPattern()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetExcludeParamKeyPattern()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.excludeParamKeyPattern, other.excludeParamKeyPattern); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetPartitionsProjectSpec("); + boolean first = true; + + sb.append("fieldList:"); + if (this.fieldList == null) { + sb.append("null"); + } else { + sb.append(this.fieldList); + } + first = false; + if (!first) sb.append(", "); + sb.append("paramKeyPattern:"); + if (this.paramKeyPattern == null) { + sb.append("null"); + } else { + sb.append(this.paramKeyPattern); + } + first = false; + if (!first) sb.append(", "); + sb.append("excludeParamKeyPattern:"); + sb.append(this.excludeParamKeyPattern); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetPartitionsProjectSpecStandardSchemeFactory implements SchemeFactory { + public GetPartitionsProjectSpecStandardScheme getScheme() { + return new GetPartitionsProjectSpecStandardScheme(); + } + } + + private static class GetPartitionsProjectSpecStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsProjectSpec struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FIELD_LIST + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list242 = iprot.readListBegin(); + struct.fieldList = new ArrayList(_list242.size); + String _elem243; + for (int _i244 = 0; _i244 < _list242.size; ++_i244) + { + _elem243 = iprot.readString(); + struct.fieldList.add(_elem243); + } + iprot.readListEnd(); + } + struct.setFieldListIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // PARAM_KEY_PATTERN + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.paramKeyPattern = iprot.readString(); + struct.setParamKeyPatternIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // EXCLUDE_PARAM_KEY_PATTERN + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.excludeParamKeyPattern = iprot.readBool(); + struct.setExcludeParamKeyPatternIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsProjectSpec struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.fieldList != null) { + oprot.writeFieldBegin(FIELD_LIST_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fieldList.size())); + for (String _iter245 : struct.fieldList) + { + oprot.writeString(_iter245); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.paramKeyPattern != null) { + oprot.writeFieldBegin(PARAM_KEY_PATTERN_FIELD_DESC); + oprot.writeString(struct.paramKeyPattern); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(EXCLUDE_PARAM_KEY_PATTERN_FIELD_DESC); + oprot.writeBool(struct.excludeParamKeyPattern); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetPartitionsProjectSpecTupleSchemeFactory implements SchemeFactory { + public GetPartitionsProjectSpecTupleScheme getScheme() { + return new GetPartitionsProjectSpecTupleScheme(); + } + } + + private static class GetPartitionsProjectSpecTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProjectSpec struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetFieldList()) { + optionals.set(0); + } + if (struct.isSetParamKeyPattern()) { + optionals.set(1); + } + if (struct.isSetExcludeParamKeyPattern()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetFieldList()) { + { + oprot.writeI32(struct.fieldList.size()); + for (String _iter246 : struct.fieldList) + { + oprot.writeString(_iter246); + } + } + } + if (struct.isSetParamKeyPattern()) { + oprot.writeString(struct.paramKeyPattern); + } + if (struct.isSetExcludeParamKeyPattern()) { + oprot.writeBool(struct.excludeParamKeyPattern); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsProjectSpec struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fieldList = new ArrayList(_list247.size); + String _elem248; + for (int _i249 = 0; _i249 < _list247.size; ++_i249) + { + _elem248 = iprot.readString(); + struct.fieldList.add(_elem248); + } + } + struct.setFieldListIsSet(true); + } + if (incoming.get(1)) { + struct.paramKeyPattern = iprot.readString(); + struct.setParamKeyPatternIsSet(true); + } + if (incoming.get(2)) { + struct.excludeParamKeyPattern = iprot.readBool(); + struct.setExcludeParamKeyPatternIsSet(true); + } + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java new file mode 100644 index 0000000000000000000000000000000000000000..37058d878839b9b9aeda1775298bd51948ee9026 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsRequest.java @@ -0,0 +1,509 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPartitionsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPartitionsRequest"); + + private static final org.apache.thrift.protocol.TField PROJECTION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("projectionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField FILTER_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("filterSpec", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetPartitionsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetPartitionsRequestTupleSchemeFactory()); + } + + private GetPartitionsProjectSpec projectionSpec; // required + private GetPartitionsFilterSpec filterSpec; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PROJECTION_SPEC((short)1, "projectionSpec"), + FILTER_SPEC((short)2, "filterSpec"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PROJECTION_SPEC + return PROJECTION_SPEC; + case 2: // FILTER_SPEC + return FILTER_SPEC; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PROJECTION_SPEC, new org.apache.thrift.meta_data.FieldMetaData("projectionSpec", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetPartitionsProjectSpec.class))); + tmpMap.put(_Fields.FILTER_SPEC, new org.apache.thrift.meta_data.FieldMetaData("filterSpec", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetPartitionsFilterSpec.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsRequest.class, metaDataMap); + } + + public GetPartitionsRequest() { + } + + public GetPartitionsRequest( + GetPartitionsProjectSpec projectionSpec, + GetPartitionsFilterSpec filterSpec) + { + this(); + this.projectionSpec = projectionSpec; + this.filterSpec = filterSpec; + } + + /** + * Performs a deep copy on other. + */ + public GetPartitionsRequest(GetPartitionsRequest other) { + if (other.isSetProjectionSpec()) { + this.projectionSpec = new GetPartitionsProjectSpec(other.projectionSpec); + } + if (other.isSetFilterSpec()) { + this.filterSpec = new GetPartitionsFilterSpec(other.filterSpec); + } + } + + public GetPartitionsRequest deepCopy() { + return new GetPartitionsRequest(this); + } + + @Override + public void clear() { + this.projectionSpec = null; + this.filterSpec = null; + } + + public GetPartitionsProjectSpec getProjectionSpec() { + return this.projectionSpec; + } + + public void setProjectionSpec(GetPartitionsProjectSpec projectionSpec) { + this.projectionSpec = projectionSpec; + } + + public void unsetProjectionSpec() { + this.projectionSpec = null; + } + + /** Returns true if field projectionSpec is set (has been assigned a value) and false otherwise */ + public boolean isSetProjectionSpec() { + return this.projectionSpec != null; + } + + public void setProjectionSpecIsSet(boolean value) { + if (!value) { + this.projectionSpec = null; + } + } + + public GetPartitionsFilterSpec getFilterSpec() { + return this.filterSpec; + } + + public void setFilterSpec(GetPartitionsFilterSpec filterSpec) { + this.filterSpec = filterSpec; + } + + public void unsetFilterSpec() { + this.filterSpec = null; + } + + /** Returns true if field filterSpec is set (has been assigned a value) and false otherwise */ + public boolean isSetFilterSpec() { + return this.filterSpec != null; + } + + public void setFilterSpecIsSet(boolean value) { + if (!value) { + this.filterSpec = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PROJECTION_SPEC: + if (value == null) { + unsetProjectionSpec(); + } else { + setProjectionSpec((GetPartitionsProjectSpec)value); + } + break; + + case FILTER_SPEC: + if (value == null) { + unsetFilterSpec(); + } else { + setFilterSpec((GetPartitionsFilterSpec)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PROJECTION_SPEC: + return getProjectionSpec(); + + case FILTER_SPEC: + return getFilterSpec(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PROJECTION_SPEC: + return isSetProjectionSpec(); + case FILTER_SPEC: + return isSetFilterSpec(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetPartitionsRequest) + return this.equals((GetPartitionsRequest)that); + return false; + } + + public boolean equals(GetPartitionsRequest that) { + if (that == null) + return false; + + boolean this_present_projectionSpec = true && this.isSetProjectionSpec(); + boolean that_present_projectionSpec = true && that.isSetProjectionSpec(); + if (this_present_projectionSpec || that_present_projectionSpec) { + if (!(this_present_projectionSpec && that_present_projectionSpec)) + return false; + if (!this.projectionSpec.equals(that.projectionSpec)) + return false; + } + + boolean this_present_filterSpec = true && this.isSetFilterSpec(); + boolean that_present_filterSpec = true && that.isSetFilterSpec(); + if (this_present_filterSpec || that_present_filterSpec) { + if (!(this_present_filterSpec && that_present_filterSpec)) + return false; + if (!this.filterSpec.equals(that.filterSpec)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_projectionSpec = true && (isSetProjectionSpec()); + list.add(present_projectionSpec); + if (present_projectionSpec) + list.add(projectionSpec); + + boolean present_filterSpec = true && (isSetFilterSpec()); + list.add(present_filterSpec); + if (present_filterSpec) + list.add(filterSpec); + + return list.hashCode(); + } + + @Override + public int compareTo(GetPartitionsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetProjectionSpec()).compareTo(other.isSetProjectionSpec()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetProjectionSpec()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.projectionSpec, other.projectionSpec); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFilterSpec()).compareTo(other.isSetFilterSpec()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFilterSpec()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filterSpec, other.filterSpec); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetPartitionsRequest("); + boolean first = true; + + sb.append("projectionSpec:"); + if (this.projectionSpec == null) { + sb.append("null"); + } else { + sb.append(this.projectionSpec); + } + first = false; + if (!first) sb.append(", "); + sb.append("filterSpec:"); + if (this.filterSpec == null) { + sb.append("null"); + } else { + sb.append(this.filterSpec); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (projectionSpec != null) { + projectionSpec.validate(); + } + if (filterSpec != null) { + filterSpec.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetPartitionsRequestStandardSchemeFactory implements SchemeFactory { + public GetPartitionsRequestStandardScheme getScheme() { + return new GetPartitionsRequestStandardScheme(); + } + } + + private static class GetPartitionsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PROJECTION_SPEC + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.projectionSpec = new GetPartitionsProjectSpec(); + struct.projectionSpec.read(iprot); + struct.setProjectionSpecIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // FILTER_SPEC + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.filterSpec = new GetPartitionsFilterSpec(); + struct.filterSpec.read(iprot); + struct.setFilterSpecIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.projectionSpec != null) { + oprot.writeFieldBegin(PROJECTION_SPEC_FIELD_DESC); + struct.projectionSpec.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.filterSpec != null) { + oprot.writeFieldBegin(FILTER_SPEC_FIELD_DESC); + struct.filterSpec.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetPartitionsRequestTupleSchemeFactory implements SchemeFactory { + public GetPartitionsRequestTupleScheme getScheme() { + return new GetPartitionsRequestTupleScheme(); + } + } + + private static class GetPartitionsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetProjectionSpec()) { + optionals.set(0); + } + if (struct.isSetFilterSpec()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetProjectionSpec()) { + struct.projectionSpec.write(oprot); + } + if (struct.isSetFilterSpec()) { + struct.filterSpec.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.projectionSpec = new GetPartitionsProjectSpec(); + struct.projectionSpec.read(iprot); + struct.setProjectionSpecIsSet(true); + } + if (incoming.get(1)) { + struct.filterSpec = new GetPartitionsFilterSpec(); + struct.filterSpec.read(iprot); + struct.setFilterSpecIsSet(true); + } + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java new file mode 100644 index 0000000000000000000000000000000000000000..92eec3eb0a19de79497a80a2620e0c71d9e883f7 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsResponse.java @@ -0,0 +1,449 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPartitionsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPartitionsResponse"); + + private static final org.apache.thrift.protocol.TField PARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionSpec", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetPartitionsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetPartitionsResponseTupleSchemeFactory()); + } + + private List partitionSpec; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PARTITION_SPEC((short)1, "partitionSpec"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PARTITION_SPEC + return PARTITION_SPEC; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PARTITION_SPEC, new org.apache.thrift.meta_data.FieldMetaData("partitionSpec", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "PartitionSpec")))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsResponse.class, metaDataMap); + } + + public GetPartitionsResponse() { + } + + public GetPartitionsResponse( + List partitionSpec) + { + this(); + this.partitionSpec = partitionSpec; + } + + /** + * Performs a deep copy on other. + */ + public GetPartitionsResponse(GetPartitionsResponse other) { + if (other.isSetPartitionSpec()) { + List __this__partitionSpec = new ArrayList(other.partitionSpec.size()); + for (PartitionSpec other_element : other.partitionSpec) { + __this__partitionSpec.add(other_element); + } + this.partitionSpec = __this__partitionSpec; + } + } + + public GetPartitionsResponse deepCopy() { + return new GetPartitionsResponse(this); + } + + @Override + public void clear() { + this.partitionSpec = null; + } + + public int getPartitionSpecSize() { + return (this.partitionSpec == null) ? 0 : this.partitionSpec.size(); + } + + public java.util.Iterator getPartitionSpecIterator() { + return (this.partitionSpec == null) ? null : this.partitionSpec.iterator(); + } + + public void addToPartitionSpec(PartitionSpec elem) { + if (this.partitionSpec == null) { + this.partitionSpec = new ArrayList(); + } + this.partitionSpec.add(elem); + } + + public List getPartitionSpec() { + return this.partitionSpec; + } + + public void setPartitionSpec(List partitionSpec) { + this.partitionSpec = partitionSpec; + } + + public void unsetPartitionSpec() { + this.partitionSpec = null; + } + + /** Returns true if field partitionSpec is set (has been assigned a value) and false otherwise */ + public boolean isSetPartitionSpec() { + return this.partitionSpec != null; + } + + public void setPartitionSpecIsSet(boolean value) { + if (!value) { + this.partitionSpec = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PARTITION_SPEC: + if (value == null) { + unsetPartitionSpec(); + } else { + setPartitionSpec((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PARTITION_SPEC: + return getPartitionSpec(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PARTITION_SPEC: + return isSetPartitionSpec(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetPartitionsResponse) + return this.equals((GetPartitionsResponse)that); + return false; + } + + public boolean equals(GetPartitionsResponse that) { + if (that == null) + return false; + + boolean this_present_partitionSpec = true && this.isSetPartitionSpec(); + boolean that_present_partitionSpec = true && that.isSetPartitionSpec(); + if (this_present_partitionSpec || that_present_partitionSpec) { + if (!(this_present_partitionSpec && that_present_partitionSpec)) + return false; + if (!this.partitionSpec.equals(that.partitionSpec)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_partitionSpec = true && (isSetPartitionSpec()); + list.add(present_partitionSpec); + if (present_partitionSpec) + list.add(partitionSpec); + + return list.hashCode(); + } + + @Override + public int compareTo(GetPartitionsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetPartitionSpec()).compareTo(other.isSetPartitionSpec()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartitionSpec()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionSpec, other.partitionSpec); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetPartitionsResponse("); + boolean first = true; + + sb.append("partitionSpec:"); + if (this.partitionSpec == null) { + sb.append("null"); + } else { + sb.append(this.partitionSpec); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetPartitionsResponseStandardSchemeFactory implements SchemeFactory { + public GetPartitionsResponseStandardScheme getScheme() { + return new GetPartitionsResponseStandardScheme(); + } + } + + private static class GetPartitionsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PARTITION_SPEC + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list266 = iprot.readListBegin(); + struct.partitionSpec = new ArrayList(_list266.size); + PartitionSpec _elem267; + for (int _i268 = 0; _i268 < _list266.size; ++_i268) + { + _elem267 = new PartitionSpec(); + _elem267.read(iprot); + struct.partitionSpec.add(_elem267); + } + iprot.readListEnd(); + } + struct.setPartitionSpecIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.partitionSpec != null) { + oprot.writeFieldBegin(PARTITION_SPEC_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionSpec.size())); + for (PartitionSpec _iter269 : struct.partitionSpec) + { + _iter269.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetPartitionsResponseTupleSchemeFactory implements SchemeFactory { + public GetPartitionsResponseTupleScheme getScheme() { + return new GetPartitionsResponseTupleScheme(); + } + } + + private static class GetPartitionsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetPartitionSpec()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetPartitionSpec()) { + { + oprot.writeI32(struct.partitionSpec.size()); + for (PartitionSpec _iter270 : struct.partitionSpec) + { + _iter270.write(oprot); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionSpec = new ArrayList(_list271.size); + PartitionSpec _elem272; + for (int _i273 = 0; _i273 < _list271.size; ++_i273) + { + _elem272 = new PartitionSpec(); + _elem272.read(iprot); + struct.partitionSpec.add(_elem272); + } + } + struct.setPartitionSpecIsSet(true); + } + } + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index f241b5aa79e827811ac5b7297d1da1bc5978ae09..ed9b709f654c0f16632b55fcb41009c6398f105a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -606,13 +606,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list856.size); - String _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list888.size); + String _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem857 = iprot.readString(); - struct.tblNames.add(_elem857); + _elem889 = iprot.readString(); + struct.tblNames.add(_elem889); } iprot.readListEnd(); } @@ -661,9 +661,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter859 : struct.tblNames) + for (String _iter891 : struct.tblNames) { - oprot.writeString(_iter859); + oprot.writeString(_iter891); } oprot.writeListEnd(); } @@ -716,9 +716,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter860 : struct.tblNames) + for (String _iter892 : struct.tblNames) { - oprot.writeString(_iter860); + oprot.writeString(_iter892); } } } @@ -738,13 +738,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list861.size); - String _elem862; - for (int _i863 = 0; _i863 < _list861.size; ++_i863) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list893.size); + String _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem862 = iprot.readString(); - struct.tblNames.add(_elem862); + _elem894 = iprot.readString(); + struct.tblNames.add(_elem894); } } struct.setTblNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index b351c40f97c452f780f045bc71823096c172421b..31584b09ca621e2ebdfbf1e8ee6509c417efd467 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); - struct.tables = new ArrayList(_list864.size); - Table _elem865; - for (int _i866 = 0; _i866 < _list864.size; ++_i866) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list896.size); + Table _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem865 = new Table(); - _elem865.read(iprot); - struct.tables.add(_elem865); + _elem897 = new Table(); + _elem897.read(iprot); + struct.tables.add(_elem897); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter867 : struct.tables) + for (Table _iter899 : struct.tables) { - _iter867.write(oprot); + _iter899.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter868 : struct.tables) + for (Table _iter900 : struct.tables) { - _iter868.write(oprot); + _iter900.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list869.size); - Table _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list901.size); + Table _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem870 = new Table(); - _elem870.read(iprot); - struct.tables.add(_elem870); + _elem902 = new Table(); + _elem902.read(iprot); + struct.tables.add(_elem902); } } struct.setTablesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java index a5bbb86af1f05c0181debdaec4787c192f31157f..b055939858186342201b16c898f85277f19cec67 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java @@ -436,13 +436,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsReq case 1: // FULL_TABLE_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list618 = iprot.readListBegin(); - struct.fullTableNames = new ArrayList(_list618.size); - String _elem619; - for (int _i620 = 0; _i620 < _list618.size; ++_i620) + org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); + struct.fullTableNames = new ArrayList(_list650.size); + String _elem651; + for (int _i652 = 0; _i652 < _list650.size; ++_i652) { - _elem619 = iprot.readString(); - struct.fullTableNames.add(_elem619); + _elem651 = iprot.readString(); + struct.fullTableNames.add(_elem651); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(FULL_TABLE_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fullTableNames.size())); - for (String _iter621 : struct.fullTableNames) + for (String _iter653 : struct.fullTableNames) { - oprot.writeString(_iter621); + oprot.writeString(_iter653); } oprot.writeListEnd(); } @@ -508,9 +508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fullTableNames.size()); - for (String _iter622 : struct.fullTableNames) + for (String _iter654 : struct.fullTableNames) { - oprot.writeString(_iter622); + oprot.writeString(_iter654); } } oprot.writeString(struct.validTxnList); @@ -520,13 +520,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list623 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fullTableNames = new ArrayList(_list623.size); - String _elem624; - for (int _i625 = 0; _i625 < _list623.size; ++_i625) + org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fullTableNames = new ArrayList(_list655.size); + String _elem656; + for (int _i657 = 0; _i657 < _list655.size; ++_i657) { - _elem624 = iprot.readString(); - struct.fullTableNames.add(_elem624); + _elem656 = iprot.readString(); + struct.fullTableNames.add(_elem656); } } struct.setFullTableNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java index 96a6a00572de5079d3d0687b00d3c8229018cedf..8996f8bf0281157805ef6ba145d0447286abbe0e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRes case 1: // TBL_VALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); - struct.tblValidWriteIds = new ArrayList(_list634.size); - TableValidWriteIds _elem635; - for (int _i636 = 0; _i636 < _list634.size; ++_i636) + org.apache.thrift.protocol.TList _list666 = iprot.readListBegin(); + struct.tblValidWriteIds = new ArrayList(_list666.size); + TableValidWriteIds _elem667; + for (int _i668 = 0; _i668 < _list666.size; ++_i668) { - _elem635 = new TableValidWriteIds(); - _elem635.read(iprot); - struct.tblValidWriteIds.add(_elem635); + _elem667 = new TableValidWriteIds(); + _elem667.read(iprot); + struct.tblValidWriteIds.add(_elem667); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size())); - for (TableValidWriteIds _iter637 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter669 : struct.tblValidWriteIds) { - _iter637.write(oprot); + _iter669.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tblValidWriteIds.size()); - for (TableValidWriteIds _iter638 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter670 : struct.tblValidWriteIds) { - _iter638.write(oprot); + _iter670.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tblValidWriteIds = new ArrayList(_list639.size); - TableValidWriteIds _elem640; - for (int _i641 = 0; _i641 < _list639.size; ++_i641) + org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tblValidWriteIds = new ArrayList(_list671.size); + TableValidWriteIds _elem672; + for (int _i673 = 0; _i673 < _list671.size; ++_i673) { - _elem640 = new TableValidWriteIds(); - _elem640.read(iprot); - struct.tblValidWriteIds.add(_elem640); + _elem672 = new TableValidWriteIds(); + _elem672.read(iprot); + struct.tblValidWriteIds.add(_elem672); } } struct.setTblValidWriteIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index a6535aa8fc07e188ad53ff8bf6ef8d20ff5caa21..70ec20c6382052b5b171155d4b3ca664a0ac25dc 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set682 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set682.size); - long _elem683; - for (int _i684 = 0; _i684 < _set682.size; ++_i684) + org.apache.thrift.protocol.TSet _set714 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set714.size); + long _elem715; + for (int _i716 = 0; _i716 < _set714.size; ++_i716) { - _elem683 = iprot.readI64(); - struct.aborted.add(_elem683); + _elem715 = iprot.readI64(); + struct.aborted.add(_elem715); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set685 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set685.size); - long _elem686; - for (int _i687 = 0; _i687 < _set685.size; ++_i687) + org.apache.thrift.protocol.TSet _set717 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set717.size); + long _elem718; + for (int _i719 = 0; _i719 < _set717.size; ++_i719) { - _elem686 = iprot.readI64(); - struct.nosuch.add(_elem686); + _elem718 = iprot.readI64(); + struct.nosuch.add(_elem718); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter688 : struct.aborted) + for (long _iter720 : struct.aborted) { - oprot.writeI64(_iter688); + oprot.writeI64(_iter720); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter689 : struct.nosuch) + for (long _iter721 : struct.nosuch) { - oprot.writeI64(_iter689); + oprot.writeI64(_iter721); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter690 : struct.aborted) + for (long _iter722 : struct.aborted) { - oprot.writeI64(_iter690); + oprot.writeI64(_iter722); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter691 : struct.nosuch) + for (long _iter723 : struct.nosuch) { - oprot.writeI64(_iter691); + oprot.writeI64(_iter723); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set692 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set692.size); - long _elem693; - for (int _i694 = 0; _i694 < _set692.size; ++_i694) + org.apache.thrift.protocol.TSet _set724 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set724.size); + long _elem725; + for (int _i726 = 0; _i726 < _set724.size; ++_i726) { - _elem693 = iprot.readI64(); - struct.aborted.add(_elem693); + _elem725 = iprot.readI64(); + struct.aborted.add(_elem725); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set695 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set695.size); - long _elem696; - for (int _i697 = 0; _i697 < _set695.size; ++_i697) + org.apache.thrift.protocol.TSet _set727 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set727.size); + long _elem728; + for (int _i729 = 0; _i729 < _set727.size; ++_i729) { - _elem696 = iprot.readI64(); - struct.nosuch.add(_elem696); + _elem728 = iprot.readI64(); + struct.nosuch.add(_elem728); } } struct.setNosuchIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 70690a4e0a711e24728fff7ffc63c6019bda6938..0279ee1e69b448adea41db3d4098557e9d5dbc1d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list740.size); - String _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list772.size); + String _elem773; + for (int _i774 = 0; _i774 < _list772.size; ++_i774) { - _elem741 = iprot.readString(); - struct.filesAdded.add(_elem741); + _elem773 = iprot.readString(); + struct.filesAdded.add(_elem773); } iprot.readListEnd(); } @@ -654,13 +654,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list743 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list743.size); - String _elem744; - for (int _i745 = 0; _i745 < _list743.size; ++_i745) + org.apache.thrift.protocol.TList _list775 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list775.size); + String _elem776; + for (int _i777 = 0; _i777 < _list775.size; ++_i777) { - _elem744 = iprot.readString(); - struct.filesAddedChecksum.add(_elem744); + _elem776 = iprot.readString(); + struct.filesAddedChecksum.add(_elem776); } iprot.readListEnd(); } @@ -672,13 +672,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 4: // SUB_DIRECTORY_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); - struct.subDirectoryList = new ArrayList(_list746.size); - String _elem747; - for (int _i748 = 0; _i748 < _list746.size; ++_i748) + org.apache.thrift.protocol.TList _list778 = iprot.readListBegin(); + struct.subDirectoryList = new ArrayList(_list778.size); + String _elem779; + for (int _i780 = 0; _i780 < _list778.size; ++_i780) { - _elem747 = iprot.readString(); - struct.subDirectoryList.add(_elem747); + _elem779 = iprot.readString(); + struct.subDirectoryList.add(_elem779); } iprot.readListEnd(); } @@ -709,9 +709,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter749 : struct.filesAdded) + for (String _iter781 : struct.filesAdded) { - oprot.writeString(_iter749); + oprot.writeString(_iter781); } oprot.writeListEnd(); } @@ -722,9 +722,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter750 : struct.filesAddedChecksum) + for (String _iter782 : struct.filesAddedChecksum) { - oprot.writeString(_iter750); + oprot.writeString(_iter782); } oprot.writeListEnd(); } @@ -736,9 +736,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(SUB_DIRECTORY_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.subDirectoryList.size())); - for (String _iter751 : struct.subDirectoryList) + for (String _iter783 : struct.subDirectoryList) { - oprot.writeString(_iter751); + oprot.writeString(_iter783); } oprot.writeListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter752 : struct.filesAdded) + for (String _iter784 : struct.filesAdded) { - oprot.writeString(_iter752); + oprot.writeString(_iter784); } } BitSet optionals = new BitSet(); @@ -786,18 +786,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter753 : struct.filesAddedChecksum) + for (String _iter785 : struct.filesAddedChecksum) { - oprot.writeString(_iter753); + oprot.writeString(_iter785); } } } if (struct.isSetSubDirectoryList()) { { oprot.writeI32(struct.subDirectoryList.size()); - for (String _iter754 : struct.subDirectoryList) + for (String _iter786 : struct.subDirectoryList) { - oprot.writeString(_iter754); + oprot.writeString(_iter786); } } } @@ -807,13 +807,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list755 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list755.size); - String _elem756; - for (int _i757 = 0; _i757 < _list755.size; ++_i757) + org.apache.thrift.protocol.TList _list787 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list787.size); + String _elem788; + for (int _i789 = 0; _i789 < _list787.size; ++_i789) { - _elem756 = iprot.readString(); - struct.filesAdded.add(_elem756); + _elem788 = iprot.readString(); + struct.filesAdded.add(_elem788); } } struct.setFilesAddedIsSet(true); @@ -824,26 +824,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list758 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list758.size); - String _elem759; - for (int _i760 = 0; _i760 < _list758.size; ++_i760) + org.apache.thrift.protocol.TList _list790 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list790.size); + String _elem791; + for (int _i792 = 0; _i792 < _list790.size; ++_i792) { - _elem759 = iprot.readString(); - struct.filesAddedChecksum.add(_elem759); + _elem791 = iprot.readString(); + struct.filesAddedChecksum.add(_elem791); } } struct.setFilesAddedChecksumIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.subDirectoryList = new ArrayList(_list761.size); - String _elem762; - for (int _i763 = 0; _i763 < _list761.size; ++_i763) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.subDirectoryList = new ArrayList(_list793.size); + String _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem762 = iprot.readString(); - struct.subDirectoryList.add(_elem762); + _elem794 = iprot.readString(); + struct.subDirectoryList.add(_elem794); } } struct.setSubDirectoryListIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index cabed5af7247e559a76456745da6c94d2fad67ba..f9b5e3dbc7046680e72ae8f9e712b8660f4f9dc4 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list666 = iprot.readListBegin(); - struct.component = new ArrayList(_list666.size); - LockComponent _elem667; - for (int _i668 = 0; _i668 < _list666.size; ++_i668) + org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); + struct.component = new ArrayList(_list698.size); + LockComponent _elem699; + for (int _i700 = 0; _i700 < _list698.size; ++_i700) { - _elem667 = new LockComponent(); - _elem667.read(iprot); - struct.component.add(_elem667); + _elem699 = new LockComponent(); + _elem699.read(iprot); + struct.component.add(_elem699); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter669 : struct.component) + for (LockComponent _iter701 : struct.component) { - _iter669.write(oprot); + _iter701.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter670 : struct.component) + for (LockComponent _iter702 : struct.component) { - _iter670.write(oprot); + _iter702.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list671.size); - LockComponent _elem672; - for (int _i673 = 0; _i673 < _list671.size; ++_i673) + org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list703.size); + LockComponent _elem704; + for (int _i705 = 0; _i705 < _list703.size; ++_i705) { - _elem672 = new LockComponent(); - _elem672.read(iprot); - struct.component.add(_elem672); + _elem704 = new LockComponent(); + _elem704.read(iprot); + struct.component.add(_elem704); } } struct.setComponentIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java index 54070adee31f76f741324317220893d05c9f3f0e..6371a528961af75bf07cc277c22c6b3c1f6284ad 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR case 1: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list352 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list352.size); - SQLNotNullConstraint _elem353; - for (int _i354 = 0; _i354 < _list352.size; ++_i354) + org.apache.thrift.protocol.TList _list384 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list384.size); + SQLNotNullConstraint _elem385; + for (int _i386 = 0; _i386 < _list384.size; ++_i386) { - _elem353 = new SQLNotNullConstraint(); - _elem353.read(iprot); - struct.notNullConstraints.add(_elem353); + _elem385 = new SQLNotNullConstraint(); + _elem385.read(iprot); + struct.notNullConstraints.add(_elem385); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraints oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter355 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter387 : struct.notNullConstraints) { - _iter355.write(oprot); + _iter387.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter356 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter388 : struct.notNullConstraints) { - _iter356.write(oprot); + _iter388.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list357.size); - SQLNotNullConstraint _elem358; - for (int _i359 = 0; _i359 < _list357.size; ++_i359) + org.apache.thrift.protocol.TList _list389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list389.size); + SQLNotNullConstraint _elem390; + for (int _i391 = 0; _i391 < _list389.size; ++_i391) { - _elem358 = new SQLNotNullConstraint(); - _elem358.read(iprot); - struct.notNullConstraints.add(_elem358); + _elem390 = new SQLNotNullConstraint(); + _elem390.read(iprot); + struct.notNullConstraints.add(_elem390); } } struct.setNotNullConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index e86c9f66080c6f97b93f939192a1a3ceb4c60967..1f2776b7d16139b38811fa50e68a64ff2781a9b7 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); - struct.events = new ArrayList(_list732.size); - NotificationEvent _elem733; - for (int _i734 = 0; _i734 < _list732.size; ++_i734) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.events = new ArrayList(_list764.size); + NotificationEvent _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem733 = new NotificationEvent(); - _elem733.read(iprot); - struct.events.add(_elem733); + _elem765 = new NotificationEvent(); + _elem765.read(iprot); + struct.events.add(_elem765); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter735 : struct.events) + for (NotificationEvent _iter767 : struct.events) { - _iter735.write(oprot); + _iter767.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter736 : struct.events) + for (NotificationEvent _iter768 : struct.events) { - _iter736.write(oprot); + _iter768.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list737.size); - NotificationEvent _elem738; - for (int _i739 = 0; _i739 < _list737.size; ++_i739) + org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list769.size); + NotificationEvent _elem770; + for (int _i771 = 0; _i771 < _list769.size; ++_i771) { - _elem738 = new NotificationEvent(); - _elem738.read(iprot); - struct.events.add(_elem738); + _elem770 = new NotificationEvent(); + _elem770.read(iprot); + struct.events.add(_elem770); } } struct.setEventsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java index 19b2c01b3a7cd03ee336914e493987e2c376e962..b3b2de242531d02a545f54bf4d02869f76a1e594 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java @@ -808,13 +808,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnRequest stru case 6: // REPL_SRC_TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list578 = iprot.readListBegin(); - struct.replSrcTxnIds = new ArrayList(_list578.size); - long _elem579; - for (int _i580 = 0; _i580 < _list578.size; ++_i580) + org.apache.thrift.protocol.TList _list610 = iprot.readListBegin(); + struct.replSrcTxnIds = new ArrayList(_list610.size); + long _elem611; + for (int _i612 = 0; _i612 < _list610.size; ++_i612) { - _elem579 = iprot.readI64(); - struct.replSrcTxnIds.add(_elem579); + _elem611 = iprot.readI64(); + struct.replSrcTxnIds.add(_elem611); } iprot.readListEnd(); } @@ -868,9 +868,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnRequest str oprot.writeFieldBegin(REPL_SRC_TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.replSrcTxnIds.size())); - for (long _iter581 : struct.replSrcTxnIds) + for (long _iter613 : struct.replSrcTxnIds) { - oprot.writeI64(_iter581); + oprot.writeI64(_iter613); } oprot.writeListEnd(); } @@ -917,9 +917,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnRequest stru if (struct.isSetReplSrcTxnIds()) { { oprot.writeI32(struct.replSrcTxnIds.size()); - for (long _iter582 : struct.replSrcTxnIds) + for (long _iter614 : struct.replSrcTxnIds) { - oprot.writeI64(_iter582); + oprot.writeI64(_iter614); } } } @@ -945,13 +945,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnRequest struc } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list583 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.replSrcTxnIds = new ArrayList(_list583.size); - long _elem584; - for (int _i585 = 0; _i585 < _list583.size; ++_i585) + org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.replSrcTxnIds = new ArrayList(_list615.size); + long _elem616; + for (int _i617 = 0; _i617 < _list615.size; ++_i617) { - _elem584 = iprot.readI64(); - struct.replSrcTxnIds.add(_elem584); + _elem616 = iprot.readI64(); + struct.replSrcTxnIds.add(_elem616); } } struct.setReplSrcTxnIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index 71a2c4fd8064d3ea55f3df68006464eda27446a3..9c9c918d21d34d0a0e29e2927f50cce5c9aab12b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list586 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list586.size); - long _elem587; - for (int _i588 = 0; _i588 < _list586.size; ++_i588) + org.apache.thrift.protocol.TList _list618 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list618.size); + long _elem619; + for (int _i620 = 0; _i620 < _list618.size; ++_i620) { - _elem587 = iprot.readI64(); - struct.txn_ids.add(_elem587); + _elem619 = iprot.readI64(); + struct.txn_ids.add(_elem619); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter589 : struct.txn_ids) + for (long _iter621 : struct.txn_ids) { - oprot.writeI64(_iter589); + oprot.writeI64(_iter621); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter590 : struct.txn_ids) + for (long _iter622 : struct.txn_ids) { - oprot.writeI64(_iter590); + oprot.writeI64(_iter622); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list591 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list591.size); - long _elem592; - for (int _i593 = 0; _i593 < _list591.size; ++_i593) + org.apache.thrift.protocol.TList _list623 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list623.size); + long _elem624; + for (int _i625 = 0; _i625 < _list623.size; ++_i625) { - _elem592 = iprot.readI64(); - struct.txn_ids.add(_elem592); + _elem624 = iprot.readI64(); + struct.txn_ids.add(_elem624); } } struct.setTxn_idsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionFilterMode.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionFilterMode.java new file mode 100644 index 0000000000000000000000000000000000000000..aaea9170f45ee2aadc3a53c3db4ba2d8fbeeb7e3 --- /dev/null +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionFilterMode.java @@ -0,0 +1,48 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + + +import java.util.Map; +import java.util.HashMap; +import org.apache.thrift.TEnum; + +public enum PartitionFilterMode implements org.apache.thrift.TEnum { + BY_NAMES(0), + BY_VALUES(1), + BY_EXPR(2); + + private final int value; + + private PartitionFilterMode(int value) { + this.value = value; + } + + /** + * Get the integer value of this enum value, as defined in the Thrift IDL. + */ + public int getValue() { + return value; + } + + /** + * Find a the enum type by its integer value, as defined in the Thrift IDL. + * @return null if the value is not found. + */ + public static PartitionFilterMode findByValue(int value) { + switch (value) { + case 0: + return BY_NAMES; + case 1: + return BY_VALUES; + case 2: + return BY_EXPR; + default: + return null; + } + } +} diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java index e1f4021f017b7790d1a8521fd0d70838a2dfacf6..5667d40c9d7af5bd514af760ff6a45bcebe56c3b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionListCompos case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list268 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list268.size); - Partition _elem269; - for (int _i270 = 0; _i270 < _list268.size; ++_i270) + org.apache.thrift.protocol.TList _list300 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list300.size); + Partition _elem301; + for (int _i302 = 0; _i302 < _list300.size; ++_i302) { - _elem269 = new Partition(); - _elem269.read(iprot); - struct.partitions.add(_elem269); + _elem301 = new Partition(); + _elem301.read(iprot); + struct.partitions.add(_elem301); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionListCompo oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter271 : struct.partitions) + for (Partition _iter303 : struct.partitions) { - _iter271.write(oprot); + _iter303.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionListCompos if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter272 : struct.partitions) + for (Partition _iter304 : struct.partitions) { - _iter272.write(oprot); + _iter304.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionListComposi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list273.size); - Partition _elem274; - for (int _i275 = 0; _i275 < _list273.size; ++_i275) + org.apache.thrift.protocol.TList _list305 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list305.size); + Partition _elem306; + for (int _i307 = 0; _i307 < _list305.size; ++_i307) { - _elem274 = new Partition(); - _elem274.read(iprot); - struct.partitions.add(_elem274); + _elem306 = new Partition(); + _elem306.read(iprot); + struct.partitions.add(_elem306); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java index c3fa2a2399e5bc3e42845d8f949e0ce05f13e14c..ecee098839c5ee08d14a5f5890a9e64989387d64 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java @@ -434,14 +434,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpecWithSh case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list260 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list260.size); - PartitionWithoutSD _elem261; - for (int _i262 = 0; _i262 < _list260.size; ++_i262) + org.apache.thrift.protocol.TList _list292 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list292.size); + PartitionWithoutSD _elem293; + for (int _i294 = 0; _i294 < _list292.size; ++_i294) { - _elem261 = new PartitionWithoutSD(); - _elem261.read(iprot); - struct.partitions.add(_elem261); + _elem293 = new PartitionWithoutSD(); + _elem293.read(iprot); + struct.partitions.add(_elem293); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpecWithS oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (PartitionWithoutSD _iter263 : struct.partitions) + for (PartitionWithoutSD _iter295 : struct.partitions) { - _iter263.write(oprot); + _iter295.write(oprot); } oprot.writeListEnd(); } @@ -517,9 +517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSh if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (PartitionWithoutSD _iter264 : struct.partitions) + for (PartitionWithoutSD _iter296 : struct.partitions) { - _iter264.write(oprot); + _iter296.write(oprot); } } } @@ -534,14 +534,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSha BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list265.size); - PartitionWithoutSD _elem266; - for (int _i267 = 0; _i267 < _list265.size; ++_i267) + org.apache.thrift.protocol.TList _list297 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list297.size); + PartitionWithoutSD _elem298; + for (int _i299 = 0; _i299 < _list297.size; ++_i299) { - _elem266 = new PartitionWithoutSD(); - _elem266.read(iprot); - struct.partitions.add(_elem266); + _elem298 = new PartitionWithoutSD(); + _elem298.read(iprot); + struct.partitions.add(_elem298); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index 8309769eee0da93f8427b7fc6e02a96ae3787611..783f28c111c52312040ecbcb2f75efee20122ba2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -1042,14 +1042,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 3: // PARTITION_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list522 = iprot.readListBegin(); - struct.partitionKeys = new ArrayList(_list522.size); - FieldSchema _elem523; - for (int _i524 = 0; _i524 < _list522.size; ++_i524) + org.apache.thrift.protocol.TList _list554 = iprot.readListBegin(); + struct.partitionKeys = new ArrayList(_list554.size); + FieldSchema _elem555; + for (int _i556 = 0; _i556 < _list554.size; ++_i556) { - _elem523 = new FieldSchema(); - _elem523.read(iprot); - struct.partitionKeys.add(_elem523); + _elem555 = new FieldSchema(); + _elem555.read(iprot); + struct.partitionKeys.add(_elem555); } iprot.readListEnd(); } @@ -1077,14 +1077,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 6: // PARTITION_ORDER if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list525 = iprot.readListBegin(); - struct.partitionOrder = new ArrayList(_list525.size); - FieldSchema _elem526; - for (int _i527 = 0; _i527 < _list525.size; ++_i527) + org.apache.thrift.protocol.TList _list557 = iprot.readListBegin(); + struct.partitionOrder = new ArrayList(_list557.size); + FieldSchema _elem558; + for (int _i559 = 0; _i559 < _list557.size; ++_i559) { - _elem526 = new FieldSchema(); - _elem526.read(iprot); - struct.partitionOrder.add(_elem526); + _elem558 = new FieldSchema(); + _elem558.read(iprot); + struct.partitionOrder.add(_elem558); } iprot.readListEnd(); } @@ -1144,9 +1144,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size())); - for (FieldSchema _iter528 : struct.partitionKeys) + for (FieldSchema _iter560 : struct.partitionKeys) { - _iter528.write(oprot); + _iter560.write(oprot); } oprot.writeListEnd(); } @@ -1169,9 +1169,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_ORDER_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionOrder.size())); - for (FieldSchema _iter529 : struct.partitionOrder) + for (FieldSchema _iter561 : struct.partitionOrder) { - _iter529.write(oprot); + _iter561.write(oprot); } oprot.writeListEnd(); } @@ -1216,9 +1216,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.partitionKeys.size()); - for (FieldSchema _iter530 : struct.partitionKeys) + for (FieldSchema _iter562 : struct.partitionKeys) { - _iter530.write(oprot); + _iter562.write(oprot); } } BitSet optionals = new BitSet(); @@ -1250,9 +1250,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetPartitionOrder()) { { oprot.writeI32(struct.partitionOrder.size()); - for (FieldSchema _iter531 : struct.partitionOrder) + for (FieldSchema _iter563 : struct.partitionOrder) { - _iter531.write(oprot); + _iter563.write(oprot); } } } @@ -1275,14 +1275,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list532 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionKeys = new ArrayList(_list532.size); - FieldSchema _elem533; - for (int _i534 = 0; _i534 < _list532.size; ++_i534) + org.apache.thrift.protocol.TList _list564 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionKeys = new ArrayList(_list564.size); + FieldSchema _elem565; + for (int _i566 = 0; _i566 < _list564.size; ++_i566) { - _elem533 = new FieldSchema(); - _elem533.read(iprot); - struct.partitionKeys.add(_elem533); + _elem565 = new FieldSchema(); + _elem565.read(iprot); + struct.partitionKeys.add(_elem565); } } struct.setPartitionKeysIsSet(true); @@ -1297,14 +1297,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionOrder = new ArrayList(_list535.size); - FieldSchema _elem536; - for (int _i537 = 0; _i537 < _list535.size; ++_i537) + org.apache.thrift.protocol.TList _list567 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionOrder = new ArrayList(_list567.size); + FieldSchema _elem568; + for (int _i569 = 0; _i569 < _list567.size; ++_i569) { - _elem536 = new FieldSchema(); - _elem536.read(iprot); - struct.partitionOrder.add(_elem536); + _elem568 = new FieldSchema(); + _elem568.read(iprot); + struct.partitionOrder.add(_elem568); } } struct.setPartitionOrderIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java index 7bf1c61a425750aff7c4f16b013e0ef3aed1e658..90cae6e3862e8b02729dbe002c98bfd1ecc6d0d4 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesResp case 1: // PARTITION_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list546 = iprot.readListBegin(); - struct.partitionValues = new ArrayList(_list546.size); - PartitionValuesRow _elem547; - for (int _i548 = 0; _i548 < _list546.size; ++_i548) + org.apache.thrift.protocol.TList _list578 = iprot.readListBegin(); + struct.partitionValues = new ArrayList(_list578.size); + PartitionValuesRow _elem579; + for (int _i580 = 0; _i580 < _list578.size; ++_i580) { - _elem547 = new PartitionValuesRow(); - _elem547.read(iprot); - struct.partitionValues.add(_elem547); + _elem579 = new PartitionValuesRow(); + _elem579.read(iprot); + struct.partitionValues.add(_elem579); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRes oprot.writeFieldBegin(PARTITION_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionValues.size())); - for (PartitionValuesRow _iter549 : struct.partitionValues) + for (PartitionValuesRow _iter581 : struct.partitionValues) { - _iter549.write(oprot); + _iter581.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitionValues.size()); - for (PartitionValuesRow _iter550 : struct.partitionValues) + for (PartitionValuesRow _iter582 : struct.partitionValues) { - _iter550.write(oprot); + _iter582.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list551 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionValues = new ArrayList(_list551.size); - PartitionValuesRow _elem552; - for (int _i553 = 0; _i553 < _list551.size; ++_i553) + org.apache.thrift.protocol.TList _list583 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionValues = new ArrayList(_list583.size); + PartitionValuesRow _elem584; + for (int _i585 = 0; _i585 < _list583.size; ++_i585) { - _elem552 = new PartitionValuesRow(); - _elem552.read(iprot); - struct.partitionValues.add(_elem552); + _elem584 = new PartitionValuesRow(); + _elem584.read(iprot); + struct.partitionValues.add(_elem584); } } struct.setPartitionValuesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java index 4c384f6408e9f1cac8dd47c2d23deab840470969..00919344c36ee59e2552a2456f86e9745e517b0a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRow case 1: // ROW if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list538 = iprot.readListBegin(); - struct.row = new ArrayList(_list538.size); - String _elem539; - for (int _i540 = 0; _i540 < _list538.size; ++_i540) + org.apache.thrift.protocol.TList _list570 = iprot.readListBegin(); + struct.row = new ArrayList(_list570.size); + String _elem571; + for (int _i572 = 0; _i572 < _list570.size; ++_i572) { - _elem539 = iprot.readString(); - struct.row.add(_elem539); + _elem571 = iprot.readString(); + struct.row.add(_elem571); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRow oprot.writeFieldBegin(ROW_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.row.size())); - for (String _iter541 : struct.row) + for (String _iter573 : struct.row) { - oprot.writeString(_iter541); + oprot.writeString(_iter573); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.row.size()); - for (String _iter542 : struct.row) + for (String _iter574 : struct.row) { - oprot.writeString(_iter542); + oprot.writeString(_iter574); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list543 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.row = new ArrayList(_list543.size); - String _elem544; - for (int _i545 = 0; _i545 < _list543.size; ++_i545) + org.apache.thrift.protocol.TList _list575 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.row = new ArrayList(_list575.size); + String _elem576; + for (int _i577 = 0; _i577 < _list575.size; ++_i577) { - _elem544 = iprot.readString(); - struct.row.add(_elem544); + _elem576 = iprot.readString(); + struct.row.add(_elem576); } } struct.setRowIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java index d43be205b21563c7cfa87bb0057aff2cd785adb0..2af59cfd3ab93516db7a8d5932797579b312444f 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java @@ -56,7 +56,7 @@ private int lastAccessTime; // required private String relativePath; // required private Map parameters; // required - private PrincipalPrivilegeSet privileges; // optional + private PrincipalPrivilegeSet privileges; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -135,7 +135,6 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -152,7 +151,7 @@ public String getFieldName() { new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, + tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionWithoutSD.class, metaDataMap); @@ -166,7 +165,8 @@ public PartitionWithoutSD( int createTime, int lastAccessTime, String relativePath, - Map parameters) + Map parameters, + PrincipalPrivilegeSet privileges) { this(); this.values = values; @@ -176,6 +176,7 @@ public PartitionWithoutSD( setLastAccessTimeIsSet(true); this.relativePath = relativePath; this.parameters = parameters; + this.privileges = privileges; } /** @@ -705,16 +706,14 @@ public String toString() { sb.append(this.parameters); } first = false; - if (isSetPrivileges()) { - if (!first) sb.append(", "); - sb.append("privileges:"); - if (this.privileges == null) { - sb.append("null"); - } else { - sb.append(this.privileges); - } - first = false; + if (!first) sb.append(", "); + sb.append("privileges:"); + if (this.privileges == null) { + sb.append("null"); + } else { + sb.append(this.privileges); } + first = false; sb.append(")"); return sb.toString(); } @@ -766,13 +765,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list242 = iprot.readListBegin(); - struct.values = new ArrayList(_list242.size); - String _elem243; - for (int _i244 = 0; _i244 < _list242.size; ++_i244) + org.apache.thrift.protocol.TList _list274 = iprot.readListBegin(); + struct.values = new ArrayList(_list274.size); + String _elem275; + for (int _i276 = 0; _i276 < _list274.size; ++_i276) { - _elem243 = iprot.readString(); - struct.values.add(_elem243); + _elem275 = iprot.readString(); + struct.values.add(_elem275); } iprot.readListEnd(); } @@ -808,15 +807,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD case 5: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map245 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map245.size); - String _key246; - String _val247; - for (int _i248 = 0; _i248 < _map245.size; ++_i248) + org.apache.thrift.protocol.TMap _map277 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map277.size); + String _key278; + String _val279; + for (int _i280 = 0; _i280 < _map277.size; ++_i280) { - _key246 = iprot.readString(); - _val247 = iprot.readString(); - struct.parameters.put(_key246, _val247); + _key278 = iprot.readString(); + _val279 = iprot.readString(); + struct.parameters.put(_key278, _val279); } iprot.readMapEnd(); } @@ -851,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter249 : struct.values) + for (String _iter281 : struct.values) { - oprot.writeString(_iter249); + oprot.writeString(_iter281); } oprot.writeListEnd(); } @@ -874,21 +873,19 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter250 : struct.parameters.entrySet()) + for (Map.Entry _iter282 : struct.parameters.entrySet()) { - oprot.writeString(_iter250.getKey()); - oprot.writeString(_iter250.getValue()); + oprot.writeString(_iter282.getKey()); + oprot.writeString(_iter282.getValue()); } oprot.writeMapEnd(); } oprot.writeFieldEnd(); } if (struct.privileges != null) { - if (struct.isSetPrivileges()) { - oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC); - struct.privileges.write(oprot); - oprot.writeFieldEnd(); - } + oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC); + struct.privileges.write(oprot); + oprot.writeFieldEnd(); } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -930,9 +927,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); - for (String _iter251 : struct.values) + for (String _iter283 : struct.values) { - oprot.writeString(_iter251); + oprot.writeString(_iter283); } } } @@ -948,10 +945,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter252 : struct.parameters.entrySet()) + for (Map.Entry _iter284 : struct.parameters.entrySet()) { - oprot.writeString(_iter252.getKey()); - oprot.writeString(_iter252.getValue()); + oprot.writeString(_iter284.getKey()); + oprot.writeString(_iter284.getValue()); } } } @@ -966,13 +963,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD s BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list253.size); - String _elem254; - for (int _i255 = 0; _i255 < _list253.size; ++_i255) + org.apache.thrift.protocol.TList _list285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.values = new ArrayList(_list285.size); + String _elem286; + for (int _i287 = 0; _i287 < _list285.size; ++_i287) { - _elem254 = iprot.readString(); - struct.values.add(_elem254); + _elem286 = iprot.readString(); + struct.values.add(_elem286); } } struct.setValuesIsSet(true); @@ -991,15 +988,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD s } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map256 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map256.size); - String _key257; - String _val258; - for (int _i259 = 0; _i259 < _map256.size; ++_i259) + org.apache.thrift.protocol.TMap _map288 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map288.size); + String _key289; + String _val290; + for (int _i291 = 0; _i291 < _map288.size; ++_i291) { - _key257 = iprot.readString(); - _val258 = iprot.readString(); - struct.parameters.put(_key257, _val258); + _key289 = iprot.readString(); + _val290 = iprot.readString(); + struct.parameters.put(_key289, _val290); } } struct.setParametersIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index 27cdac609ad975f5f07bdac9e0b262d6e4142bb0..6d2a8ae0878fc75ef13c7f0d78b39af80817dfe6 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRes case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list424 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list424.size); - Partition _elem425; - for (int _i426 = 0; _i426 < _list424.size; ++_i426) + org.apache.thrift.protocol.TList _list456 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list456.size); + Partition _elem457; + for (int _i458 = 0; _i458 < _list456.size; ++_i458) { - _elem425 = new Partition(); - _elem425.read(iprot); - struct.partitions.add(_elem425); + _elem457 = new Partition(); + _elem457.read(iprot); + struct.partitions.add(_elem457); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter427 : struct.partitions) + for (Partition _iter459 : struct.partitions) { - _iter427.write(oprot); + _iter459.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter428 : struct.partitions) + for (Partition _iter460 : struct.partitions) { - _iter428.write(oprot); + _iter460.write(oprot); } } oprot.writeBool(struct.hasUnknownPartitions); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list429.size); - Partition _elem430; - for (int _i431 = 0; _i431 < _list429.size; ++_i431) + org.apache.thrift.protocol.TList _list461 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list461.size); + Partition _elem462; + for (int _i463 = 0; _i463 < _list461.size; ++_i463) { - _elem430 = new Partition(); - _elem430.read(iprot); - struct.partitions.add(_elem430); + _elem462 = new Partition(); + _elem462.read(iprot); + struct.partitions.add(_elem462); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 7e0842072f54441bdcfd7127bd1b989b5c285cd0..682fac902f419e1f381c82dc3c183b8e0721dc73 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -802,13 +802,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list466 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list466.size); - String _elem467; - for (int _i468 = 0; _i468 < _list466.size; ++_i468) + org.apache.thrift.protocol.TList _list498 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list498.size); + String _elem499; + for (int _i500 = 0; _i500 < _list498.size; ++_i500) { - _elem467 = iprot.readString(); - struct.colNames.add(_elem467); + _elem499 = iprot.readString(); + struct.colNames.add(_elem499); } iprot.readListEnd(); } @@ -820,13 +820,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 4: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list469 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list469.size); - String _elem470; - for (int _i471 = 0; _i471 < _list469.size; ++_i471) + org.apache.thrift.protocol.TList _list501 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list501.size); + String _elem502; + for (int _i503 = 0; _i503 < _list501.size; ++_i503) { - _elem470 = iprot.readString(); - struct.partNames.add(_elem470); + _elem502 = iprot.readString(); + struct.partNames.add(_elem502); } iprot.readListEnd(); } @@ -878,9 +878,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter472 : struct.colNames) + for (String _iter504 : struct.colNames) { - oprot.writeString(_iter472); + oprot.writeString(_iter504); } oprot.writeListEnd(); } @@ -890,9 +890,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter473 : struct.partNames) + for (String _iter505 : struct.partNames) { - oprot.writeString(_iter473); + oprot.writeString(_iter505); } oprot.writeListEnd(); } @@ -933,16 +933,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter474 : struct.colNames) + for (String _iter506 : struct.colNames) { - oprot.writeString(_iter474); + oprot.writeString(_iter506); } } { oprot.writeI32(struct.partNames.size()); - for (String _iter475 : struct.partNames) + for (String _iter507 : struct.partNames) { - oprot.writeString(_iter475); + oprot.writeString(_iter507); } } BitSet optionals = new BitSet(); @@ -969,24 +969,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list476 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list476.size); - String _elem477; - for (int _i478 = 0; _i478 < _list476.size; ++_i478) + org.apache.thrift.protocol.TList _list508 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list508.size); + String _elem509; + for (int _i510 = 0; _i510 < _list508.size; ++_i510) { - _elem477 = iprot.readString(); - struct.colNames.add(_elem477); + _elem509 = iprot.readString(); + struct.colNames.add(_elem509); } } struct.setColNamesIsSet(true); { - org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list479.size); - String _elem480; - for (int _i481 = 0; _i481 < _list479.size; ++_i481) + org.apache.thrift.protocol.TList _list511 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list511.size); + String _elem512; + for (int _i513 = 0; _i513 < _list511.size; ++_i513) { - _elem480 = iprot.readString(); - struct.partNames.add(_elem480); + _elem512 = iprot.readString(); + struct.partNames.add(_elem512); } } struct.setPartNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index becfcc904dc0cadb81b7a93b613d074e50c3d67e..4b361a7778d0ca1b28ec60519fbe984926153554 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -444,26 +444,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu case 1: // PART_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map440 = iprot.readMapBegin(); - struct.partStats = new HashMap>(2*_map440.size); - String _key441; - List _val442; - for (int _i443 = 0; _i443 < _map440.size; ++_i443) + org.apache.thrift.protocol.TMap _map472 = iprot.readMapBegin(); + struct.partStats = new HashMap>(2*_map472.size); + String _key473; + List _val474; + for (int _i475 = 0; _i475 < _map472.size; ++_i475) { - _key441 = iprot.readString(); + _key473 = iprot.readString(); { - org.apache.thrift.protocol.TList _list444 = iprot.readListBegin(); - _val442 = new ArrayList(_list444.size); - ColumnStatisticsObj _elem445; - for (int _i446 = 0; _i446 < _list444.size; ++_i446) + org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); + _val474 = new ArrayList(_list476.size); + ColumnStatisticsObj _elem477; + for (int _i478 = 0; _i478 < _list476.size; ++_i478) { - _elem445 = new ColumnStatisticsObj(); - _elem445.read(iprot); - _val442.add(_elem445); + _elem477 = new ColumnStatisticsObj(); + _elem477.read(iprot); + _val474.add(_elem477); } iprot.readListEnd(); } - struct.partStats.put(_key441, _val442); + struct.partStats.put(_key473, _val474); } iprot.readMapEnd(); } @@ -497,14 +497,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes oprot.writeFieldBegin(PART_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size())); - for (Map.Entry> _iter447 : struct.partStats.entrySet()) + for (Map.Entry> _iter479 : struct.partStats.entrySet()) { - oprot.writeString(_iter447.getKey()); + oprot.writeString(_iter479.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter447.getValue().size())); - for (ColumnStatisticsObj _iter448 : _iter447.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter479.getValue().size())); + for (ColumnStatisticsObj _iter480 : _iter479.getValue()) { - _iter448.write(oprot); + _iter480.write(oprot); } oprot.writeListEnd(); } @@ -537,14 +537,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partStats.size()); - for (Map.Entry> _iter449 : struct.partStats.entrySet()) + for (Map.Entry> _iter481 : struct.partStats.entrySet()) { - oprot.writeString(_iter449.getKey()); + oprot.writeString(_iter481.getKey()); { - oprot.writeI32(_iter449.getValue().size()); - for (ColumnStatisticsObj _iter450 : _iter449.getValue()) + oprot.writeI32(_iter481.getValue().size()); + for (ColumnStatisticsObj _iter482 : _iter481.getValue()) { - _iter450.write(oprot); + _iter482.write(oprot); } } } @@ -563,25 +563,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map451 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.partStats = new HashMap>(2*_map451.size); - String _key452; - List _val453; - for (int _i454 = 0; _i454 < _map451.size; ++_i454) + org.apache.thrift.protocol.TMap _map483 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.partStats = new HashMap>(2*_map483.size); + String _key484; + List _val485; + for (int _i486 = 0; _i486 < _map483.size; ++_i486) { - _key452 = iprot.readString(); + _key484 = iprot.readString(); { - org.apache.thrift.protocol.TList _list455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val453 = new ArrayList(_list455.size); - ColumnStatisticsObj _elem456; - for (int _i457 = 0; _i457 < _list455.size; ++_i457) + org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val485 = new ArrayList(_list487.size); + ColumnStatisticsObj _elem488; + for (int _i489 = 0; _i489 < _list487.size; ++_i489) { - _elem456 = new ColumnStatisticsObj(); - _elem456.read(iprot); - _val453.add(_elem456); + _elem488 = new ColumnStatisticsObj(); + _elem488.read(iprot); + _val485.add(_elem488); } } - struct.partStats.put(_key452, _val453); + struct.partStats.put(_key484, _val485); } } struct.setPartStatsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java index db265ea4f426ff56b78f9f0081675d1115de3839..13d127e37dfc0e2eae87e40015365272beeb8d04 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysResponse case 1: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list328 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list328.size); - SQLPrimaryKey _elem329; - for (int _i330 = 0; _i330 < _list328.size; ++_i330) + org.apache.thrift.protocol.TList _list360 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list360.size); + SQLPrimaryKey _elem361; + for (int _i362 = 0; _i362 < _list360.size; ++_i362) { - _elem329 = new SQLPrimaryKey(); - _elem329.read(iprot); - struct.primaryKeys.add(_elem329); + _elem361 = new SQLPrimaryKey(); + _elem361.read(iprot); + struct.primaryKeys.add(_elem361); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysRespons oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter331 : struct.primaryKeys) + for (SQLPrimaryKey _iter363 : struct.primaryKeys) { - _iter331.write(oprot); + _iter363.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter332 : struct.primaryKeys) + for (SQLPrimaryKey _iter364 : struct.primaryKeys) { - _iter332.write(oprot); + _iter364.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list333.size); - SQLPrimaryKey _elem334; - for (int _i335 = 0; _i335 < _list333.size; ++_i335) + org.apache.thrift.protocol.TList _list365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list365.size); + SQLPrimaryKey _elem366; + for (int _i367 = 0; _i367 < _list365.size; ++_i367) { - _elem334 = new SQLPrimaryKey(); - _elem334.read(iprot); - struct.primaryKeys.add(_elem334); + _elem366 = new SQLPrimaryKey(); + _elem366.read(iprot); + struct.primaryKeys.add(_elem366); } } struct.setPrimaryKeysIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index e19034c630d692613007d052b9134c012925064e..8518e441825f32da161ca704aa4040865ce45ff2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list816 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list816.size); - long _elem817; - for (int _i818 = 0; _i818 < _list816.size; ++_i818) + org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list848.size); + long _elem849; + for (int _i850 = 0; _i850 < _list848.size; ++_i850) { - _elem817 = iprot.readI64(); - struct.fileIds.add(_elem817); + _elem849 = iprot.readI64(); + struct.fileIds.add(_elem849); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list819 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list819.size); - ByteBuffer _elem820; - for (int _i821 = 0; _i821 < _list819.size; ++_i821) + org.apache.thrift.protocol.TList _list851 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list851.size); + ByteBuffer _elem852; + for (int _i853 = 0; _i853 < _list851.size; ++_i853) { - _elem820 = iprot.readBinary(); - struct.metadata.add(_elem820); + _elem852 = iprot.readBinary(); + struct.metadata.add(_elem852); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter822 : struct.fileIds) + for (long _iter854 : struct.fileIds) { - oprot.writeI64(_iter822); + oprot.writeI64(_iter854); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter823 : struct.metadata) + for (ByteBuffer _iter855 : struct.metadata) { - oprot.writeBinary(_iter823); + oprot.writeBinary(_iter855); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter824 : struct.fileIds) + for (long _iter856 : struct.fileIds) { - oprot.writeI64(_iter824); + oprot.writeI64(_iter856); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter825 : struct.metadata) + for (ByteBuffer _iter857 : struct.metadata) { - oprot.writeBinary(_iter825); + oprot.writeBinary(_iter857); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list826 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list826.size); - long _elem827; - for (int _i828 = 0; _i828 < _list826.size; ++_i828) + org.apache.thrift.protocol.TList _list858 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list858.size); + long _elem859; + for (int _i860 = 0; _i860 < _list858.size; ++_i860) { - _elem827 = iprot.readI64(); - struct.fileIds.add(_elem827); + _elem859 = iprot.readI64(); + struct.fileIds.add(_elem859); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list829 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list829.size); - ByteBuffer _elem830; - for (int _i831 = 0; _i831 < _list829.size; ++_i831) + org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list861.size); + ByteBuffer _elem862; + for (int _i863 = 0; _i863 < _list861.size; ++_i863) { - _elem830 = iprot.readBinary(); - struct.metadata.add(_elem830); + _elem862 = iprot.readBinary(); + struct.metadata.add(_elem862); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java index 3540e993361b83d7ae299012aa73b82561339753..6851625eb4d573cbdd0bd1edfbaa03a203f67946 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java @@ -796,13 +796,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RenamePartitionRequ case 4: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.partVals = new ArrayList(_list960.size); - String _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); + struct.partVals = new ArrayList(_list992.size); + String _elem993; + for (int _i994 = 0; _i994 < _list992.size; ++_i994) { - _elem961 = iprot.readString(); - struct.partVals.add(_elem961); + _elem993 = iprot.readString(); + struct.partVals.add(_elem993); } iprot.readListEnd(); } @@ -862,9 +862,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RenamePartitionReq oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (String _iter963 : struct.partVals) + for (String _iter995 : struct.partVals) { - oprot.writeString(_iter963); + oprot.writeString(_iter995); } oprot.writeListEnd(); } @@ -903,9 +903,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RenamePartitionRequ oprot.writeString(struct.tableName); { oprot.writeI32(struct.partVals.size()); - for (String _iter964 : struct.partVals) + for (String _iter996 : struct.partVals) { - oprot.writeString(_iter964); + oprot.writeString(_iter996); } } struct.newPart.write(oprot); @@ -933,13 +933,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RenamePartitionReque struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partVals = new ArrayList(_list965.size); - String _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partVals = new ArrayList(_list997.size); + String _elem998; + for (int _i999 = 0; _i999 < _list997.size; ++_i999) { - _elem966 = iprot.readString(); - struct.partVals.add(_elem966); + _elem998 = iprot.readString(); + struct.partVals.add(_elem998); } } struct.setPartValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java index f637d519f44500cab25d071416c272a1cf5c4f6a..2db006061421b991a176afebbafe87e228c19c4d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java @@ -813,13 +813,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ReplTblWriteIdState case 6: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list610 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list610.size); - String _elem611; - for (int _i612 = 0; _i612 < _list610.size; ++_i612) + org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list642.size); + String _elem643; + for (int _i644 = 0; _i644 < _list642.size; ++_i644) { - _elem611 = iprot.readString(); - struct.partNames.add(_elem611); + _elem643 = iprot.readString(); + struct.partNames.add(_elem643); } iprot.readListEnd(); } @@ -871,9 +871,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ReplTblWriteIdStat oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter613 : struct.partNames) + for (String _iter645 : struct.partNames) { - oprot.writeString(_iter613); + oprot.writeString(_iter645); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdState if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter614 : struct.partNames) + for (String _iter646 : struct.partNames) { - oprot.writeString(_iter614); + oprot.writeString(_iter646); } } } @@ -934,13 +934,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateR BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list615.size); - String _elem616; - for (int _i617 = 0; _i617 < _list615.size; ++_i617) + org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list647.size); + String _elem648; + for (int _i649 = 0; _i649 < _list647.size; ++_i649) { - _elem616 = iprot.readString(); - struct.partNames.add(_elem616); + _elem648 = iprot.readString(); + struct.partNames.add(_elem648); } } struct.setPartNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java index 19de923dcb62594f1154b561c26a310f49688eef..82ee11d169c2e7ae3962d7fcfd431397ddd4ab2d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java @@ -168,13 +168,13 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == NAMES_FIELD_DESC.type) { List names; { - org.apache.thrift.protocol.TList _list506 = iprot.readListBegin(); - names = new ArrayList(_list506.size); - String _elem507; - for (int _i508 = 0; _i508 < _list506.size; ++_i508) + org.apache.thrift.protocol.TList _list538 = iprot.readListBegin(); + names = new ArrayList(_list538.size); + String _elem539; + for (int _i540 = 0; _i540 < _list538.size; ++_i540) { - _elem507 = iprot.readString(); - names.add(_elem507); + _elem539 = iprot.readString(); + names.add(_elem539); } iprot.readListEnd(); } @@ -187,14 +187,14 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == EXPRS_FIELD_DESC.type) { List exprs; { - org.apache.thrift.protocol.TList _list509 = iprot.readListBegin(); - exprs = new ArrayList(_list509.size); - DropPartitionsExpr _elem510; - for (int _i511 = 0; _i511 < _list509.size; ++_i511) + org.apache.thrift.protocol.TList _list541 = iprot.readListBegin(); + exprs = new ArrayList(_list541.size); + DropPartitionsExpr _elem542; + for (int _i543 = 0; _i543 < _list541.size; ++_i543) { - _elem510 = new DropPartitionsExpr(); - _elem510.read(iprot); - exprs.add(_elem510); + _elem542 = new DropPartitionsExpr(); + _elem542.read(iprot); + exprs.add(_elem542); } iprot.readListEnd(); } @@ -219,9 +219,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter512 : names) + for (String _iter544 : names) { - oprot.writeString(_iter512); + oprot.writeString(_iter544); } oprot.writeListEnd(); } @@ -230,9 +230,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter513 : exprs) + for (DropPartitionsExpr _iter545 : exprs) { - _iter513.write(oprot); + _iter545.write(oprot); } oprot.writeListEnd(); } @@ -250,13 +250,13 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case NAMES: List names; { - org.apache.thrift.protocol.TList _list514 = iprot.readListBegin(); - names = new ArrayList(_list514.size); - String _elem515; - for (int _i516 = 0; _i516 < _list514.size; ++_i516) + org.apache.thrift.protocol.TList _list546 = iprot.readListBegin(); + names = new ArrayList(_list546.size); + String _elem547; + for (int _i548 = 0; _i548 < _list546.size; ++_i548) { - _elem515 = iprot.readString(); - names.add(_elem515); + _elem547 = iprot.readString(); + names.add(_elem547); } iprot.readListEnd(); } @@ -264,14 +264,14 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case EXPRS: List exprs; { - org.apache.thrift.protocol.TList _list517 = iprot.readListBegin(); - exprs = new ArrayList(_list517.size); - DropPartitionsExpr _elem518; - for (int _i519 = 0; _i519 < _list517.size; ++_i519) + org.apache.thrift.protocol.TList _list549 = iprot.readListBegin(); + exprs = new ArrayList(_list549.size); + DropPartitionsExpr _elem550; + for (int _i551 = 0; _i551 < _list549.size; ++_i551) { - _elem518 = new DropPartitionsExpr(); - _elem518.read(iprot); - exprs.add(_elem518); + _elem550 = new DropPartitionsExpr(); + _elem550.read(iprot); + exprs.add(_elem550); } iprot.readListEnd(); } @@ -291,9 +291,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter520 : names) + for (String _iter552 : names) { - oprot.writeString(_iter520); + oprot.writeString(_iter552); } oprot.writeListEnd(); } @@ -302,9 +302,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter521 : exprs) + for (DropPartitionsExpr _iter553 : exprs) { - _iter521.write(oprot); + _iter553.write(oprot); } oprot.writeListEnd(); } diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java index b4cd16f8cf08f587f7890c48aea2a52077fdc610..59eabc89664b294c02b2b143d064d2251d567e47 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java @@ -445,14 +445,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 1: // FIELD_SCHEMAS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list300 = iprot.readListBegin(); - struct.fieldSchemas = new ArrayList(_list300.size); - FieldSchema _elem301; - for (int _i302 = 0; _i302 < _list300.size; ++_i302) + org.apache.thrift.protocol.TList _list332 = iprot.readListBegin(); + struct.fieldSchemas = new ArrayList(_list332.size); + FieldSchema _elem333; + for (int _i334 = 0; _i334 < _list332.size; ++_i334) { - _elem301 = new FieldSchema(); - _elem301.read(iprot); - struct.fieldSchemas.add(_elem301); + _elem333 = new FieldSchema(); + _elem333.read(iprot); + struct.fieldSchemas.add(_elem333); } iprot.readListEnd(); } @@ -464,15 +464,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 2: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map303 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map303.size); - String _key304; - String _val305; - for (int _i306 = 0; _i306 < _map303.size; ++_i306) + org.apache.thrift.protocol.TMap _map335 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map335.size); + String _key336; + String _val337; + for (int _i338 = 0; _i338 < _map335.size; ++_i338) { - _key304 = iprot.readString(); - _val305 = iprot.readString(); - struct.properties.put(_key304, _val305); + _key336 = iprot.readString(); + _val337 = iprot.readString(); + struct.properties.put(_key336, _val337); } iprot.readMapEnd(); } @@ -498,9 +498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(FIELD_SCHEMAS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fieldSchemas.size())); - for (FieldSchema _iter307 : struct.fieldSchemas) + for (FieldSchema _iter339 : struct.fieldSchemas) { - _iter307.write(oprot); + _iter339.write(oprot); } oprot.writeListEnd(); } @@ -510,10 +510,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter308 : struct.properties.entrySet()) + for (Map.Entry _iter340 : struct.properties.entrySet()) { - oprot.writeString(_iter308.getKey()); - oprot.writeString(_iter308.getValue()); + oprot.writeString(_iter340.getKey()); + oprot.writeString(_iter340.getValue()); } oprot.writeMapEnd(); } @@ -547,19 +547,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Schema struct) thro if (struct.isSetFieldSchemas()) { { oprot.writeI32(struct.fieldSchemas.size()); - for (FieldSchema _iter309 : struct.fieldSchemas) + for (FieldSchema _iter341 : struct.fieldSchemas) { - _iter309.write(oprot); + _iter341.write(oprot); } } } if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter310 : struct.properties.entrySet()) + for (Map.Entry _iter342 : struct.properties.entrySet()) { - oprot.writeString(_iter310.getKey()); - oprot.writeString(_iter310.getValue()); + oprot.writeString(_iter342.getKey()); + oprot.writeString(_iter342.getValue()); } } } @@ -571,29 +571,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Schema struct) throw BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.fieldSchemas = new ArrayList(_list311.size); - FieldSchema _elem312; - for (int _i313 = 0; _i313 < _list311.size; ++_i313) + org.apache.thrift.protocol.TList _list343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.fieldSchemas = new ArrayList(_list343.size); + FieldSchema _elem344; + for (int _i345 = 0; _i345 < _list343.size; ++_i345) { - _elem312 = new FieldSchema(); - _elem312.read(iprot); - struct.fieldSchemas.add(_elem312); + _elem344 = new FieldSchema(); + _elem344.read(iprot); + struct.fieldSchemas.add(_elem344); } } struct.setFieldSchemasIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map314 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map314.size); - String _key315; - String _val316; - for (int _i317 = 0; _i317 < _map314.size; ++_i317) + org.apache.thrift.protocol.TMap _map346 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map346.size); + String _key347; + String _val348; + for (int _i349 = 0; _i349 < _map346.size; ++_i349) { - _key315 = iprot.readString(); - _val316 = iprot.readString(); - struct.properties.put(_key315, _val316); + _key347 = iprot.readString(); + _val348 = iprot.readString(); + struct.properties.put(_key347, _val348); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java index 88d7e3fedf59e6204ab133ce3d67828aa9688a94..1d7677cb43b4e4a152d0c98a48abacd864cf5b0e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java @@ -1119,14 +1119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struc case 4: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); - struct.cols = new ArrayList(_list936.size); - FieldSchema _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); + struct.cols = new ArrayList(_list968.size); + FieldSchema _elem969; + for (int _i970 = 0; _i970 < _list968.size; ++_i970) { - _elem937 = new FieldSchema(); - _elem937.read(iprot); - struct.cols.add(_elem937); + _elem969 = new FieldSchema(); + _elem969.read(iprot); + struct.cols.add(_elem969); } iprot.readListEnd(); } @@ -1212,9 +1212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion stru oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter939 : struct.cols) + for (FieldSchema _iter971 : struct.cols) { - _iter939.write(oprot); + _iter971.write(oprot); } oprot.writeListEnd(); } @@ -1323,9 +1323,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struc if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter940 : struct.cols) + for (FieldSchema _iter972 : struct.cols) { - _iter940.write(oprot); + _iter972.write(oprot); } } } @@ -1368,14 +1368,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list941.size); - FieldSchema _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list973.size); + FieldSchema _elem974; + for (int _i975 = 0; _i975 < _list973.size; ++_i975) { - _elem942 = new FieldSchema(); - _elem942.read(iprot); - struct.cols.add(_elem942); + _elem974 = new FieldSchema(); + _elem974.read(iprot); + struct.cols.add(_elem974); } } struct.setColsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index 6f9053d8ab0e82ef78ebb3a646a72d10fa7d222b..621820f84c18520d63ca14230320b74e85c6edb0 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -594,14 +594,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR case 1: // COL_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list292 = iprot.readListBegin(); - struct.colStats = new ArrayList(_list292.size); - ColumnStatistics _elem293; - for (int _i294 = 0; _i294 < _list292.size; ++_i294) + org.apache.thrift.protocol.TList _list324 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list324.size); + ColumnStatistics _elem325; + for (int _i326 = 0; _i326 < _list324.size; ++_i326) { - _elem293 = new ColumnStatistics(); - _elem293.read(iprot); - struct.colStats.add(_elem293); + _elem325 = new ColumnStatistics(); + _elem325.read(iprot); + struct.colStats.add(_elem325); } iprot.readListEnd(); } @@ -651,9 +651,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeFieldBegin(COL_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); - for (ColumnStatistics _iter295 : struct.colStats) + for (ColumnStatistics _iter327 : struct.colStats) { - _iter295.write(oprot); + _iter327.write(oprot); } oprot.writeListEnd(); } @@ -695,9 +695,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.colStats.size()); - for (ColumnStatistics _iter296 : struct.colStats) + for (ColumnStatistics _iter328 : struct.colStats) { - _iter296.write(oprot); + _iter328.write(oprot); } } BitSet optionals = new BitSet(); @@ -726,14 +726,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list297 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colStats = new ArrayList(_list297.size); - ColumnStatistics _elem298; - for (int _i299 = 0; _i299 < _list297.size; ++_i299) + org.apache.thrift.protocol.TList _list329 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list329.size); + ColumnStatistics _elem330; + for (int _i331 = 0; _i331 < _list329.size; ++_i331) { - _elem298 = new ColumnStatistics(); - _elem298.read(iprot); - struct.colStats.add(_elem298); + _elem330 = new ColumnStatistics(); + _elem330.read(iprot); + struct.colStats.add(_elem330); } } struct.setColStatsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index c58885f22a7bbfb9e75f650120b850883e888e4e..0d64491dc936becaf8a29881edc23bf096058123 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list708 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list708.size); - ShowCompactResponseElement _elem709; - for (int _i710 = 0; _i710 < _list708.size; ++_i710) + org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list740.size); + ShowCompactResponseElement _elem741; + for (int _i742 = 0; _i742 < _list740.size; ++_i742) { - _elem709 = new ShowCompactResponseElement(); - _elem709.read(iprot); - struct.compacts.add(_elem709); + _elem741 = new ShowCompactResponseElement(); + _elem741.read(iprot); + struct.compacts.add(_elem741); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter711 : struct.compacts) + for (ShowCompactResponseElement _iter743 : struct.compacts) { - _iter711.write(oprot); + _iter743.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter712 : struct.compacts) + for (ShowCompactResponseElement _iter744 : struct.compacts) { - _iter712.write(oprot); + _iter744.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list713.size); - ShowCompactResponseElement _elem714; - for (int _i715 = 0; _i715 < _list713.size; ++_i715) + org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list745.size); + ShowCompactResponseElement _elem746; + for (int _i747 = 0; _i747 < _list745.size; ++_i747) { - _elem714 = new ShowCompactResponseElement(); - _elem714.read(iprot); - struct.compacts.add(_elem714); + _elem746 = new ShowCompactResponseElement(); + _elem746.read(iprot); + struct.compacts.add(_elem746); } } struct.setCompactsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index 4bbc8e7766922455bd4ad960c49599146a2e84c6..c462263c2ea46dd5f848502f3b2e4235d7de41ed 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); - struct.locks = new ArrayList(_list674.size); - ShowLocksResponseElement _elem675; - for (int _i676 = 0; _i676 < _list674.size; ++_i676) + org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); + struct.locks = new ArrayList(_list706.size); + ShowLocksResponseElement _elem707; + for (int _i708 = 0; _i708 < _list706.size; ++_i708) { - _elem675 = new ShowLocksResponseElement(); - _elem675.read(iprot); - struct.locks.add(_elem675); + _elem707 = new ShowLocksResponseElement(); + _elem707.read(iprot); + struct.locks.add(_elem707); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter677 : struct.locks) + for (ShowLocksResponseElement _iter709 : struct.locks) { - _iter677.write(oprot); + _iter709.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter678 : struct.locks) + for (ShowLocksResponseElement _iter710 : struct.locks) { - _iter678.write(oprot); + _iter710.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list679.size); - ShowLocksResponseElement _elem680; - for (int _i681 = 0; _i681 < _list679.size; ++_i681) + org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list711.size); + ShowLocksResponseElement _elem712; + for (int _i713 = 0; _i713 < _list711.size; ++_i713) { - _elem680 = new ShowLocksResponseElement(); - _elem680.read(iprot); - struct.locks.add(_elem680); + _elem712 = new ShowLocksResponseElement(); + _elem712.read(iprot); + struct.locks.add(_elem712); } } struct.setLocksIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index ae3a6e24831ac2e37592472d383ddbe9658702fc..c50ea641c5448a903e50fd8a2bce6254596136b2 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -700,13 +700,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list458 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list458.size); - String _elem459; - for (int _i460 = 0; _i460 < _list458.size; ++_i460) + org.apache.thrift.protocol.TList _list490 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list490.size); + String _elem491; + for (int _i492 = 0; _i492 < _list490.size; ++_i492) { - _elem459 = iprot.readString(); - struct.colNames.add(_elem459); + _elem491 = iprot.readString(); + struct.colNames.add(_elem491); } iprot.readListEnd(); } @@ -758,9 +758,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter461 : struct.colNames) + for (String _iter493 : struct.colNames) { - oprot.writeString(_iter461); + oprot.writeString(_iter493); } oprot.writeListEnd(); } @@ -801,9 +801,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter462 : struct.colNames) + for (String _iter494 : struct.colNames) { - oprot.writeString(_iter462); + oprot.writeString(_iter494); } } BitSet optionals = new BitSet(); @@ -830,13 +830,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list463.size); - String _elem464; - for (int _i465 = 0; _i465 < _list463.size; ++_i465) + org.apache.thrift.protocol.TList _list495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list495.size); + String _elem496; + for (int _i497 = 0; _i497 < _list495.size; ++_i497) { - _elem464 = iprot.readString(); - struct.colNames.add(_elem464); + _elem496 = iprot.readString(); + struct.colNames.add(_elem496); } } struct.setColNamesIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index b57f4bf6b2cea166e8b462ab06e4bd564b425ad4..2d9a5c1d116ec89c688d54f55c7f15db169e7e3a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -435,14 +435,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st case 1: // TABLE_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list432 = iprot.readListBegin(); - struct.tableStats = new ArrayList(_list432.size); - ColumnStatisticsObj _elem433; - for (int _i434 = 0; _i434 < _list432.size; ++_i434) + org.apache.thrift.protocol.TList _list464 = iprot.readListBegin(); + struct.tableStats = new ArrayList(_list464.size); + ColumnStatisticsObj _elem465; + for (int _i466 = 0; _i466 < _list464.size; ++_i466) { - _elem433 = new ColumnStatisticsObj(); - _elem433.read(iprot); - struct.tableStats.add(_elem433); + _elem465 = new ColumnStatisticsObj(); + _elem465.read(iprot); + struct.tableStats.add(_elem465); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size())); - for (ColumnStatisticsObj _iter435 : struct.tableStats) + for (ColumnStatisticsObj _iter467 : struct.tableStats) { - _iter435.write(oprot); + _iter467.write(oprot); } oprot.writeListEnd(); } @@ -508,9 +508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tableStats.size()); - for (ColumnStatisticsObj _iter436 : struct.tableStats) + for (ColumnStatisticsObj _iter468 : struct.tableStats) { - _iter436.write(oprot); + _iter468.write(oprot); } } BitSet optionals = new BitSet(); @@ -527,14 +527,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list437 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tableStats = new ArrayList(_list437.size); - ColumnStatisticsObj _elem438; - for (int _i439 = 0; _i439 < _list437.size; ++_i439) + org.apache.thrift.protocol.TList _list469 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableStats = new ArrayList(_list469.size); + ColumnStatisticsObj _elem470; + for (int _i471 = 0; _i471 < _list469.size; ++_i471) { - _elem438 = new ColumnStatisticsObj(); - _elem438.read(iprot); - struct.tableStats.add(_elem438); + _elem470 = new ColumnStatisticsObj(); + _elem470.read(iprot); + struct.tableStats.add(_elem470); } } struct.setTableStatsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java index 9c6ee51b26d62a06dcac63547a51106fbe4da760..d0ab3b52e9057f56de760ba875a96a3d9155cd9c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java @@ -708,13 +708,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableValidWriteIds case 3: // INVALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); - struct.invalidWriteIds = new ArrayList(_list626.size); - long _elem627; - for (int _i628 = 0; _i628 < _list626.size; ++_i628) + org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); + struct.invalidWriteIds = new ArrayList(_list658.size); + long _elem659; + for (int _i660 = 0; _i660 < _list658.size; ++_i660) { - _elem627 = iprot.readI64(); - struct.invalidWriteIds.add(_elem627); + _elem659 = iprot.readI64(); + struct.invalidWriteIds.add(_elem659); } iprot.readListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableValidWriteIds oprot.writeFieldBegin(INVALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.invalidWriteIds.size())); - for (long _iter629 : struct.invalidWriteIds) + for (long _iter661 : struct.invalidWriteIds) { - oprot.writeI64(_iter629); + oprot.writeI64(_iter661); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds oprot.writeI64(struct.writeIdHighWaterMark); { oprot.writeI32(struct.invalidWriteIds.size()); - for (long _iter630 : struct.invalidWriteIds) + for (long _iter662 : struct.invalidWriteIds) { - oprot.writeI64(_iter630); + oprot.writeI64(_iter662); } } oprot.writeBinary(struct.abortedBits); @@ -827,13 +827,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds s struct.writeIdHighWaterMark = iprot.readI64(); struct.setWriteIdHighWaterMarkIsSet(true); { - org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.invalidWriteIds = new ArrayList(_list631.size); - long _elem632; - for (int _i633 = 0; _i633 < _list631.size; ++_i633) + org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.invalidWriteIds = new ArrayList(_list663.size); + long _elem664; + for (int _i665 = 0; _i665 < _list663.size; ++_i665) { - _elem632 = iprot.readI64(); - struct.invalidWriteIds.add(_elem632); + _elem664 = iprot.readI64(); + struct.invalidWriteIds.add(_elem664); } } struct.setInvalidWriteIdsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 7ab64eadac7948a7f5077260694926cc5b6e4e4b..8178c1bb9e45da40926019cebec7e04e76995c6b 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -470,6 +470,8 @@ public List get_runtime_stats(GetRuntimeStatsRequest rqst) throws MetaException, org.apache.thrift.TException; + public GetPartitionsResponse get_partitions_with_specs(GetPartitionsRequest request) throws MetaException, org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -902,6 +904,8 @@ public void get_runtime_stats(GetRuntimeStatsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_partitions_with_specs(GetPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -7072,6 +7076,32 @@ public void send_get_runtime_stats(GetRuntimeStatsRequest rqst) throws org.apach throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_runtime_stats failed: unknown result"); } + public GetPartitionsResponse get_partitions_with_specs(GetPartitionsRequest request) throws MetaException, org.apache.thrift.TException + { + send_get_partitions_with_specs(request); + return recv_get_partitions_with_specs(); + } + + public void send_get_partitions_with_specs(GetPartitionsRequest request) throws org.apache.thrift.TException + { + get_partitions_with_specs_args args = new get_partitions_with_specs_args(); + args.setRequest(request); + sendBase("get_partitions_with_specs", args); + } + + public GetPartitionsResponse recv_get_partitions_with_specs() throws MetaException, org.apache.thrift.TException + { + get_partitions_with_specs_result result = new get_partitions_with_specs_result(); + receiveBase(result, "get_partitions_with_specs"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_with_specs failed: unknown result"); + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -14430,6 +14460,38 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } + public void get_partitions_with_specs(GetPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_partitions_with_specs_call method_call = new get_partitions_with_specs_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_with_specs_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetPartitionsRequest request; + public get_partitions_with_specs_call(GetPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_partitions_with_specs", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_partitions_with_specs_args args = new get_partitions_with_specs_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetPartitionsResponse getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_partitions_with_specs(); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -14657,6 +14719,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_partitions_with_specs() { + super("get_partitions_with_specs"); + } + + public get_partitions_with_specs_args getEmptyArgsInstance() { + return new get_partitions_with_specs_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_partitions_with_specs_result getResult(I iface, get_partitions_with_specs_args args) throws org.apache.thrift.TException { + get_partitions_with_specs_result result = new get_partitions_with_specs_result(); + try { + result.success = iface.get_partitions_with_specs(args.request); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -20379,6 +20466,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public get_partitions_with_specs() { + super("get_partitions_with_specs"); + } + + public get_partitions_with_specs_args getEmptyArgsInstance() { + return new get_partitions_with_specs_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetPartitionsResponse o) { + get_partitions_with_specs_result result = new get_partitions_with_specs_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_partitions_with_specs_result result = new get_partitions_with_specs_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_partitions_with_specs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_partitions_with_specs(args.request,resultHandler); + } + } + } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -43344,13 +43489,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); - struct.success = new ArrayList(_list968.size); - String _elem969; - for (int _i970 = 0; _i970 < _list968.size; ++_i970) + org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); + struct.success = new ArrayList(_list1000.size); + String _elem1001; + for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) { - _elem969 = iprot.readString(); - struct.success.add(_elem969); + _elem1001 = iprot.readString(); + struct.success.add(_elem1001); } iprot.readListEnd(); } @@ -43385,9 +43530,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter971 : struct.success) + for (String _iter1003 : struct.success) { - oprot.writeString(_iter971); + oprot.writeString(_iter1003); } oprot.writeListEnd(); } @@ -43426,9 +43571,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter972 : struct.success) + for (String _iter1004 : struct.success) { - oprot.writeString(_iter972); + oprot.writeString(_iter1004); } } } @@ -43443,13 +43588,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list973.size); - String _elem974; - for (int _i975 = 0; _i975 < _list973.size; ++_i975) + org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1005.size); + String _elem1006; + for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) { - _elem974 = iprot.readString(); - struct.success.add(_elem974); + _elem1006 = iprot.readString(); + struct.success.add(_elem1006); } } struct.setSuccessIsSet(true); @@ -44103,13 +44248,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); - struct.success = new ArrayList(_list976.size); - String _elem977; - for (int _i978 = 0; _i978 < _list976.size; ++_i978) + org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); + struct.success = new ArrayList(_list1008.size); + String _elem1009; + for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) { - _elem977 = iprot.readString(); - struct.success.add(_elem977); + _elem1009 = iprot.readString(); + struct.success.add(_elem1009); } iprot.readListEnd(); } @@ -44144,9 +44289,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter979 : struct.success) + for (String _iter1011 : struct.success) { - oprot.writeString(_iter979); + oprot.writeString(_iter1011); } oprot.writeListEnd(); } @@ -44185,9 +44330,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter980 : struct.success) + for (String _iter1012 : struct.success) { - oprot.writeString(_iter980); + oprot.writeString(_iter1012); } } } @@ -44202,13 +44347,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list981.size); - String _elem982; - for (int _i983 = 0; _i983 < _list981.size; ++_i983) + org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1013.size); + String _elem1014; + for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) { - _elem982 = iprot.readString(); - struct.success.add(_elem982); + _elem1014 = iprot.readString(); + struct.success.add(_elem1014); } } struct.setSuccessIsSet(true); @@ -48815,16 +48960,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map984.size); - String _key985; - Type _val986; - for (int _i987 = 0; _i987 < _map984.size; ++_i987) + org.apache.thrift.protocol.TMap _map1016 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1016.size); + String _key1017; + Type _val1018; + for (int _i1019 = 0; _i1019 < _map1016.size; ++_i1019) { - _key985 = iprot.readString(); - _val986 = new Type(); - _val986.read(iprot); - struct.success.put(_key985, _val986); + _key1017 = iprot.readString(); + _val1018 = new Type(); + _val1018.read(iprot); + struct.success.put(_key1017, _val1018); } iprot.readMapEnd(); } @@ -48859,10 +49004,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter988 : struct.success.entrySet()) + for (Map.Entry _iter1020 : struct.success.entrySet()) { - oprot.writeString(_iter988.getKey()); - _iter988.getValue().write(oprot); + oprot.writeString(_iter1020.getKey()); + _iter1020.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -48901,10 +49046,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter989 : struct.success.entrySet()) + for (Map.Entry _iter1021 : struct.success.entrySet()) { - oprot.writeString(_iter989.getKey()); - _iter989.getValue().write(oprot); + oprot.writeString(_iter1021.getKey()); + _iter1021.getValue().write(oprot); } } } @@ -48919,16 +49064,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map990.size); - String _key991; - Type _val992; - for (int _i993 = 0; _i993 < _map990.size; ++_i993) + org.apache.thrift.protocol.TMap _map1022 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1022.size); + String _key1023; + Type _val1024; + for (int _i1025 = 0; _i1025 < _map1022.size; ++_i1025) { - _key991 = iprot.readString(); - _val992 = new Type(); - _val992.read(iprot); - struct.success.put(_key991, _val992); + _key1023 = iprot.readString(); + _val1024 = new Type(); + _val1024.read(iprot); + struct.success.put(_key1023, _val1024); } } struct.setSuccessIsSet(true); @@ -49963,14 +50108,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); - struct.success = new ArrayList(_list994.size); - FieldSchema _elem995; - for (int _i996 = 0; _i996 < _list994.size; ++_i996) + org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); + struct.success = new ArrayList(_list1026.size); + FieldSchema _elem1027; + for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) { - _elem995 = new FieldSchema(); - _elem995.read(iprot); - struct.success.add(_elem995); + _elem1027 = new FieldSchema(); + _elem1027.read(iprot); + struct.success.add(_elem1027); } iprot.readListEnd(); } @@ -50023,9 +50168,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter997 : struct.success) + for (FieldSchema _iter1029 : struct.success) { - _iter997.write(oprot); + _iter1029.write(oprot); } oprot.writeListEnd(); } @@ -50080,9 +50225,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter998 : struct.success) + for (FieldSchema _iter1030 : struct.success) { - _iter998.write(oprot); + _iter1030.write(oprot); } } } @@ -50103,14 +50248,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list999.size); - FieldSchema _elem1000; - for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) + org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1031.size); + FieldSchema _elem1032; + for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) { - _elem1000 = new FieldSchema(); - _elem1000.read(iprot); - struct.success.add(_elem1000); + _elem1032 = new FieldSchema(); + _elem1032.read(iprot); + struct.success.add(_elem1032); } } struct.setSuccessIsSet(true); @@ -51264,14 +51409,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); - struct.success = new ArrayList(_list1002.size); - FieldSchema _elem1003; - for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) + org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); + struct.success = new ArrayList(_list1034.size); + FieldSchema _elem1035; + for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) { - _elem1003 = new FieldSchema(); - _elem1003.read(iprot); - struct.success.add(_elem1003); + _elem1035 = new FieldSchema(); + _elem1035.read(iprot); + struct.success.add(_elem1035); } iprot.readListEnd(); } @@ -51324,9 +51469,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1005 : struct.success) + for (FieldSchema _iter1037 : struct.success) { - _iter1005.write(oprot); + _iter1037.write(oprot); } oprot.writeListEnd(); } @@ -51381,9 +51526,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1006 : struct.success) + for (FieldSchema _iter1038 : struct.success) { - _iter1006.write(oprot); + _iter1038.write(oprot); } } } @@ -51404,14 +51549,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1007.size); - FieldSchema _elem1008; - for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) + org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1039.size); + FieldSchema _elem1040; + for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) { - _elem1008 = new FieldSchema(); - _elem1008.read(iprot); - struct.success.add(_elem1008); + _elem1040 = new FieldSchema(); + _elem1040.read(iprot); + struct.success.add(_elem1040); } } struct.setSuccessIsSet(true); @@ -52456,14 +52601,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); - struct.success = new ArrayList(_list1010.size); - FieldSchema _elem1011; - for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) + org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); + struct.success = new ArrayList(_list1042.size); + FieldSchema _elem1043; + for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) { - _elem1011 = new FieldSchema(); - _elem1011.read(iprot); - struct.success.add(_elem1011); + _elem1043 = new FieldSchema(); + _elem1043.read(iprot); + struct.success.add(_elem1043); } iprot.readListEnd(); } @@ -52516,9 +52661,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1013 : struct.success) + for (FieldSchema _iter1045 : struct.success) { - _iter1013.write(oprot); + _iter1045.write(oprot); } oprot.writeListEnd(); } @@ -52573,9 +52718,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1014 : struct.success) + for (FieldSchema _iter1046 : struct.success) { - _iter1014.write(oprot); + _iter1046.write(oprot); } } } @@ -52596,14 +52741,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1015.size); - FieldSchema _elem1016; - for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) + org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1047.size); + FieldSchema _elem1048; + for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) { - _elem1016 = new FieldSchema(); - _elem1016.read(iprot); - struct.success.add(_elem1016); + _elem1048 = new FieldSchema(); + _elem1048.read(iprot); + struct.success.add(_elem1048); } } struct.setSuccessIsSet(true); @@ -53757,14 +53902,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); - struct.success = new ArrayList(_list1018.size); - FieldSchema _elem1019; - for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) + org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); + struct.success = new ArrayList(_list1050.size); + FieldSchema _elem1051; + for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) { - _elem1019 = new FieldSchema(); - _elem1019.read(iprot); - struct.success.add(_elem1019); + _elem1051 = new FieldSchema(); + _elem1051.read(iprot); + struct.success.add(_elem1051); } iprot.readListEnd(); } @@ -53817,9 +53962,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1021 : struct.success) + for (FieldSchema _iter1053 : struct.success) { - _iter1021.write(oprot); + _iter1053.write(oprot); } oprot.writeListEnd(); } @@ -53874,9 +54019,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1022 : struct.success) + for (FieldSchema _iter1054 : struct.success) { - _iter1022.write(oprot); + _iter1054.write(oprot); } } } @@ -53897,14 +54042,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1023.size); - FieldSchema _elem1024; - for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) + org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1055.size); + FieldSchema _elem1056; + for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) { - _elem1024 = new FieldSchema(); - _elem1024.read(iprot); - struct.success.add(_elem1024); + _elem1056 = new FieldSchema(); + _elem1056.read(iprot); + struct.success.add(_elem1056); } } struct.setSuccessIsSet(true); @@ -57033,14 +57178,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list1026.size); - SQLPrimaryKey _elem1027; - for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) + org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list1058.size); + SQLPrimaryKey _elem1059; + for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) { - _elem1027 = new SQLPrimaryKey(); - _elem1027.read(iprot); - struct.primaryKeys.add(_elem1027); + _elem1059 = new SQLPrimaryKey(); + _elem1059.read(iprot); + struct.primaryKeys.add(_elem1059); } iprot.readListEnd(); } @@ -57052,14 +57197,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1029 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list1029.size); - SQLForeignKey _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1061 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list1061.size); + SQLForeignKey _elem1062; + for (int _i1063 = 0; _i1063 < _list1061.size; ++_i1063) { - _elem1030 = new SQLForeignKey(); - _elem1030.read(iprot); - struct.foreignKeys.add(_elem1030); + _elem1062 = new SQLForeignKey(); + _elem1062.read(iprot); + struct.foreignKeys.add(_elem1062); } iprot.readListEnd(); } @@ -57071,14 +57216,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list1032.size); - SQLUniqueConstraint _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1064 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list1064.size); + SQLUniqueConstraint _elem1065; + for (int _i1066 = 0; _i1066 < _list1064.size; ++_i1066) { - _elem1033 = new SQLUniqueConstraint(); - _elem1033.read(iprot); - struct.uniqueConstraints.add(_elem1033); + _elem1065 = new SQLUniqueConstraint(); + _elem1065.read(iprot); + struct.uniqueConstraints.add(_elem1065); } iprot.readListEnd(); } @@ -57090,14 +57235,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1035 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list1035.size); - SQLNotNullConstraint _elem1036; - for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037) + org.apache.thrift.protocol.TList _list1067 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list1067.size); + SQLNotNullConstraint _elem1068; + for (int _i1069 = 0; _i1069 < _list1067.size; ++_i1069) { - _elem1036 = new SQLNotNullConstraint(); - _elem1036.read(iprot); - struct.notNullConstraints.add(_elem1036); + _elem1068 = new SQLNotNullConstraint(); + _elem1068.read(iprot); + struct.notNullConstraints.add(_elem1068); } iprot.readListEnd(); } @@ -57109,14 +57254,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list1038.size); - SQLDefaultConstraint _elem1039; - for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040) + org.apache.thrift.protocol.TList _list1070 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list1070.size); + SQLDefaultConstraint _elem1071; + for (int _i1072 = 0; _i1072 < _list1070.size; ++_i1072) { - _elem1039 = new SQLDefaultConstraint(); - _elem1039.read(iprot); - struct.defaultConstraints.add(_elem1039); + _elem1071 = new SQLDefaultConstraint(); + _elem1071.read(iprot); + struct.defaultConstraints.add(_elem1071); } iprot.readListEnd(); } @@ -57128,14 +57273,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1041 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list1041.size); - SQLCheckConstraint _elem1042; - for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) + org.apache.thrift.protocol.TList _list1073 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list1073.size); + SQLCheckConstraint _elem1074; + for (int _i1075 = 0; _i1075 < _list1073.size; ++_i1075) { - _elem1042 = new SQLCheckConstraint(); - _elem1042.read(iprot); - struct.checkConstraints.add(_elem1042); + _elem1074 = new SQLCheckConstraint(); + _elem1074.read(iprot); + struct.checkConstraints.add(_elem1074); } iprot.readListEnd(); } @@ -57166,9 +57311,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1044 : struct.primaryKeys) + for (SQLPrimaryKey _iter1076 : struct.primaryKeys) { - _iter1044.write(oprot); + _iter1076.write(oprot); } oprot.writeListEnd(); } @@ -57178,9 +57323,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1045 : struct.foreignKeys) + for (SQLForeignKey _iter1077 : struct.foreignKeys) { - _iter1045.write(oprot); + _iter1077.write(oprot); } oprot.writeListEnd(); } @@ -57190,9 +57335,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1046 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1078 : struct.uniqueConstraints) { - _iter1046.write(oprot); + _iter1078.write(oprot); } oprot.writeListEnd(); } @@ -57202,9 +57347,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1047 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1079 : struct.notNullConstraints) { - _iter1047.write(oprot); + _iter1079.write(oprot); } oprot.writeListEnd(); } @@ -57214,9 +57359,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1048 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1080 : struct.defaultConstraints) { - _iter1048.write(oprot); + _iter1080.write(oprot); } oprot.writeListEnd(); } @@ -57226,9 +57371,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1049 : struct.checkConstraints) + for (SQLCheckConstraint _iter1081 : struct.checkConstraints) { - _iter1049.write(oprot); + _iter1081.write(oprot); } oprot.writeListEnd(); } @@ -57280,54 +57425,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1050 : struct.primaryKeys) + for (SQLPrimaryKey _iter1082 : struct.primaryKeys) { - _iter1050.write(oprot); + _iter1082.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1051 : struct.foreignKeys) + for (SQLForeignKey _iter1083 : struct.foreignKeys) { - _iter1051.write(oprot); + _iter1083.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1052 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1084 : struct.uniqueConstraints) { - _iter1052.write(oprot); + _iter1084.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1053 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1085 : struct.notNullConstraints) { - _iter1053.write(oprot); + _iter1085.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1054 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1086 : struct.defaultConstraints) { - _iter1054.write(oprot); + _iter1086.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1055 : struct.checkConstraints) + for (SQLCheckConstraint _iter1087 : struct.checkConstraints) { - _iter1055.write(oprot); + _iter1087.write(oprot); } } } @@ -57344,84 +57489,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1056 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list1056.size); - SQLPrimaryKey _elem1057; - for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058) + org.apache.thrift.protocol.TList _list1088 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list1088.size); + SQLPrimaryKey _elem1089; + for (int _i1090 = 0; _i1090 < _list1088.size; ++_i1090) { - _elem1057 = new SQLPrimaryKey(); - _elem1057.read(iprot); - struct.primaryKeys.add(_elem1057); + _elem1089 = new SQLPrimaryKey(); + _elem1089.read(iprot); + struct.primaryKeys.add(_elem1089); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list1059.size); - SQLForeignKey _elem1060; - for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061) + org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list1091.size); + SQLForeignKey _elem1092; + for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) { - _elem1060 = new SQLForeignKey(); - _elem1060.read(iprot); - struct.foreignKeys.add(_elem1060); + _elem1092 = new SQLForeignKey(); + _elem1092.read(iprot); + struct.foreignKeys.add(_elem1092); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1062 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list1062.size); - SQLUniqueConstraint _elem1063; - for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064) + org.apache.thrift.protocol.TList _list1094 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list1094.size); + SQLUniqueConstraint _elem1095; + for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) { - _elem1063 = new SQLUniqueConstraint(); - _elem1063.read(iprot); - struct.uniqueConstraints.add(_elem1063); + _elem1095 = new SQLUniqueConstraint(); + _elem1095.read(iprot); + struct.uniqueConstraints.add(_elem1095); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list1065.size); - SQLNotNullConstraint _elem1066; - for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) + org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1097.size); + SQLNotNullConstraint _elem1098; + for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) { - _elem1066 = new SQLNotNullConstraint(); - _elem1066.read(iprot); - struct.notNullConstraints.add(_elem1066); + _elem1098 = new SQLNotNullConstraint(); + _elem1098.read(iprot); + struct.notNullConstraints.add(_elem1098); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1068 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list1068.size); - SQLDefaultConstraint _elem1069; - for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) + org.apache.thrift.protocol.TList _list1100 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1100.size); + SQLDefaultConstraint _elem1101; + for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) { - _elem1069 = new SQLDefaultConstraint(); - _elem1069.read(iprot); - struct.defaultConstraints.add(_elem1069); + _elem1101 = new SQLDefaultConstraint(); + _elem1101.read(iprot); + struct.defaultConstraints.add(_elem1101); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list1071.size); - SQLCheckConstraint _elem1072; - for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) + org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1103.size); + SQLCheckConstraint _elem1104; + for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) { - _elem1072 = new SQLCheckConstraint(); - _elem1072.read(iprot); - struct.checkConstraints.add(_elem1072); + _elem1104 = new SQLCheckConstraint(); + _elem1104.read(iprot); + struct.checkConstraints.add(_elem1104); } } struct.setCheckConstraintsIsSet(true); @@ -66571,13 +66716,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1074.size); - String _elem1075; - for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) + org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1106.size); + String _elem1107; + for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) { - _elem1075 = iprot.readString(); - struct.partNames.add(_elem1075); + _elem1107 = iprot.readString(); + struct.partNames.add(_elem1107); } iprot.readListEnd(); } @@ -66613,9 +66758,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1077 : struct.partNames) + for (String _iter1109 : struct.partNames) { - oprot.writeString(_iter1077); + oprot.writeString(_iter1109); } oprot.writeListEnd(); } @@ -66658,9 +66803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1078 : struct.partNames) + for (String _iter1110 : struct.partNames) { - oprot.writeString(_iter1078); + oprot.writeString(_iter1110); } } } @@ -66680,13 +66825,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1079.size); - String _elem1080; - for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) + org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1111.size); + String _elem1112; + for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) { - _elem1080 = iprot.readString(); - struct.partNames.add(_elem1080); + _elem1112 = iprot.readString(); + struct.partNames.add(_elem1112); } } struct.setPartNamesIsSet(true); @@ -68743,13 +68888,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); - struct.success = new ArrayList(_list1082.size); - String _elem1083; - for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) + org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); + struct.success = new ArrayList(_list1114.size); + String _elem1115; + for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) { - _elem1083 = iprot.readString(); - struct.success.add(_elem1083); + _elem1115 = iprot.readString(); + struct.success.add(_elem1115); } iprot.readListEnd(); } @@ -68784,9 +68929,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1085 : struct.success) + for (String _iter1117 : struct.success) { - oprot.writeString(_iter1085); + oprot.writeString(_iter1117); } oprot.writeListEnd(); } @@ -68825,9 +68970,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1086 : struct.success) + for (String _iter1118 : struct.success) { - oprot.writeString(_iter1086); + oprot.writeString(_iter1118); } } } @@ -68842,13 +68987,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1087.size); - String _elem1088; - for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) + org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1119.size); + String _elem1120; + for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) { - _elem1088 = iprot.readString(); - struct.success.add(_elem1088); + _elem1120 = iprot.readString(); + struct.success.add(_elem1120); } } struct.setSuccessIsSet(true); @@ -69822,13 +69967,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin(); - struct.success = new ArrayList(_list1090.size); - String _elem1091; - for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092) + org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); + struct.success = new ArrayList(_list1122.size); + String _elem1123; + for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) { - _elem1091 = iprot.readString(); - struct.success.add(_elem1091); + _elem1123 = iprot.readString(); + struct.success.add(_elem1123); } iprot.readListEnd(); } @@ -69863,9 +70008,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1093 : struct.success) + for (String _iter1125 : struct.success) { - oprot.writeString(_iter1093); + oprot.writeString(_iter1125); } oprot.writeListEnd(); } @@ -69904,9 +70049,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1094 : struct.success) + for (String _iter1126 : struct.success) { - oprot.writeString(_iter1094); + oprot.writeString(_iter1126); } } } @@ -69921,13 +70066,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1095.size); - String _elem1096; - for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097) + org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1127.size); + String _elem1128; + for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) { - _elem1096 = iprot.readString(); - struct.success.add(_elem1096); + _elem1128 = iprot.readString(); + struct.success.add(_elem1128); } } struct.setSuccessIsSet(true); @@ -70693,13 +70838,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin(); - struct.success = new ArrayList(_list1098.size); - String _elem1099; - for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100) + org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); + struct.success = new ArrayList(_list1130.size); + String _elem1131; + for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) { - _elem1099 = iprot.readString(); - struct.success.add(_elem1099); + _elem1131 = iprot.readString(); + struct.success.add(_elem1131); } iprot.readListEnd(); } @@ -70734,9 +70879,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1101 : struct.success) + for (String _iter1133 : struct.success) { - oprot.writeString(_iter1101); + oprot.writeString(_iter1133); } oprot.writeListEnd(); } @@ -70775,9 +70920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1102 : struct.success) + for (String _iter1134 : struct.success) { - oprot.writeString(_iter1102); + oprot.writeString(_iter1134); } } } @@ -70792,13 +70937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1103.size); - String _elem1104; - for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105) + org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1135.size); + String _elem1136; + for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) { - _elem1104 = iprot.readString(); - struct.success.add(_elem1104); + _elem1136 = iprot.readString(); + struct.success.add(_elem1136); } } struct.setSuccessIsSet(true); @@ -71303,13 +71448,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1106.size); - String _elem1107; - for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108) + org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1138.size); + String _elem1139; + for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) { - _elem1107 = iprot.readString(); - struct.tbl_types.add(_elem1107); + _elem1139 = iprot.readString(); + struct.tbl_types.add(_elem1139); } iprot.readListEnd(); } @@ -71345,9 +71490,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1109 : struct.tbl_types) + for (String _iter1141 : struct.tbl_types) { - oprot.writeString(_iter1109); + oprot.writeString(_iter1141); } oprot.writeListEnd(); } @@ -71390,9 +71535,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1110 : struct.tbl_types) + for (String _iter1142 : struct.tbl_types) { - oprot.writeString(_iter1110); + oprot.writeString(_iter1142); } } } @@ -71412,13 +71557,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1111.size); - String _elem1112; - for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113) + org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1143.size); + String _elem1144; + for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) { - _elem1112 = iprot.readString(); - struct.tbl_types.add(_elem1112); + _elem1144 = iprot.readString(); + struct.tbl_types.add(_elem1144); } } struct.setTbl_typesIsSet(true); @@ -71824,14 +71969,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin(); - struct.success = new ArrayList(_list1114.size); - TableMeta _elem1115; - for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116) + org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); + struct.success = new ArrayList(_list1146.size); + TableMeta _elem1147; + for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) { - _elem1115 = new TableMeta(); - _elem1115.read(iprot); - struct.success.add(_elem1115); + _elem1147 = new TableMeta(); + _elem1147.read(iprot); + struct.success.add(_elem1147); } iprot.readListEnd(); } @@ -71866,9 +72011,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1117 : struct.success) + for (TableMeta _iter1149 : struct.success) { - _iter1117.write(oprot); + _iter1149.write(oprot); } oprot.writeListEnd(); } @@ -71907,9 +72052,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1118 : struct.success) + for (TableMeta _iter1150 : struct.success) { - _iter1118.write(oprot); + _iter1150.write(oprot); } } } @@ -71924,14 +72069,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1119.size); - TableMeta _elem1120; - for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121) + org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1151.size); + TableMeta _elem1152; + for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) { - _elem1120 = new TableMeta(); - _elem1120.read(iprot); - struct.success.add(_elem1120); + _elem1152 = new TableMeta(); + _elem1152.read(iprot); + struct.success.add(_elem1152); } } struct.setSuccessIsSet(true); @@ -72697,13 +72842,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); - struct.success = new ArrayList(_list1122.size); - String _elem1123; - for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) + org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); + struct.success = new ArrayList(_list1154.size); + String _elem1155; + for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) { - _elem1123 = iprot.readString(); - struct.success.add(_elem1123); + _elem1155 = iprot.readString(); + struct.success.add(_elem1155); } iprot.readListEnd(); } @@ -72738,9 +72883,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1125 : struct.success) + for (String _iter1157 : struct.success) { - oprot.writeString(_iter1125); + oprot.writeString(_iter1157); } oprot.writeListEnd(); } @@ -72779,9 +72924,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1126 : struct.success) + for (String _iter1158 : struct.success) { - oprot.writeString(_iter1126); + oprot.writeString(_iter1158); } } } @@ -72796,13 +72941,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1127.size); - String _elem1128; - for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) + org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1159.size); + String _elem1160; + for (int _i1161 = 0; _i1161 < _list1159.size; ++_i1161) { - _elem1128 = iprot.readString(); - struct.success.add(_elem1128); + _elem1160 = iprot.readString(); + struct.success.add(_elem1160); } } struct.setSuccessIsSet(true); @@ -74255,13 +74400,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1130.size); - String _elem1131; - for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) + org.apache.thrift.protocol.TList _list1162 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1162.size); + String _elem1163; + for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) { - _elem1131 = iprot.readString(); - struct.tbl_names.add(_elem1131); + _elem1163 = iprot.readString(); + struct.tbl_names.add(_elem1163); } iprot.readListEnd(); } @@ -74292,9 +74437,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1133 : struct.tbl_names) + for (String _iter1165 : struct.tbl_names) { - oprot.writeString(_iter1133); + oprot.writeString(_iter1165); } oprot.writeListEnd(); } @@ -74331,9 +74476,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1134 : struct.tbl_names) + for (String _iter1166 : struct.tbl_names) { - oprot.writeString(_iter1134); + oprot.writeString(_iter1166); } } } @@ -74349,13 +74494,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1135 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1135.size); - String _elem1136; - for (int _i1137 = 0; _i1137 < _list1135.size; ++_i1137) + org.apache.thrift.protocol.TList _list1167 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1167.size); + String _elem1168; + for (int _i1169 = 0; _i1169 < _list1167.size; ++_i1169) { - _elem1136 = iprot.readString(); - struct.tbl_names.add(_elem1136); + _elem1168 = iprot.readString(); + struct.tbl_names.add(_elem1168); } } struct.setTbl_namesIsSet(true); @@ -74680,14 +74825,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1138 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1138.size); - Table _elem1139; - for (int _i1140 = 0; _i1140 < _list1138.size; ++_i1140) + org.apache.thrift.protocol.TList _list1170 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1170.size); + Table _elem1171; + for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) { - _elem1139 = new Table(); - _elem1139.read(iprot); - struct.success.add(_elem1139); + _elem1171 = new Table(); + _elem1171.read(iprot); + struct.success.add(_elem1171); } iprot.readListEnd(); } @@ -74713,9 +74858,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1141 : struct.success) + for (Table _iter1173 : struct.success) { - _iter1141.write(oprot); + _iter1173.write(oprot); } oprot.writeListEnd(); } @@ -74746,9 +74891,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1142 : struct.success) + for (Table _iter1174 : struct.success) { - _iter1142.write(oprot); + _iter1174.write(oprot); } } } @@ -74760,14 +74905,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1143.size); - Table _elem1144; - for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) + org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1175.size); + Table _elem1176; + for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) { - _elem1144 = new Table(); - _elem1144.read(iprot); - struct.success.add(_elem1144); + _elem1176 = new Table(); + _elem1176.read(iprot); + struct.success.add(_elem1176); } } struct.setSuccessIsSet(true); @@ -80275,13 +80420,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); - struct.success = new ArrayList(_list1146.size); - String _elem1147; - for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) + org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); + struct.success = new ArrayList(_list1178.size); + String _elem1179; + for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) { - _elem1147 = iprot.readString(); - struct.success.add(_elem1147); + _elem1179 = iprot.readString(); + struct.success.add(_elem1179); } iprot.readListEnd(); } @@ -80334,9 +80479,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1149 : struct.success) + for (String _iter1181 : struct.success) { - oprot.writeString(_iter1149); + oprot.writeString(_iter1181); } oprot.writeListEnd(); } @@ -80391,9 +80536,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1150 : struct.success) + for (String _iter1182 : struct.success) { - oprot.writeString(_iter1150); + oprot.writeString(_iter1182); } } } @@ -80414,13 +80559,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1151.size); - String _elem1152; - for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) + org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1183.size); + String _elem1184; + for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) { - _elem1152 = iprot.readString(); - struct.success.add(_elem1152); + _elem1184 = iprot.readString(); + struct.success.add(_elem1184); } } struct.setSuccessIsSet(true); @@ -87217,14 +87362,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1154.size); - Partition _elem1155; - for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) + org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1186.size); + Partition _elem1187; + for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) { - _elem1155 = new Partition(); - _elem1155.read(iprot); - struct.new_parts.add(_elem1155); + _elem1187 = new Partition(); + _elem1187.read(iprot); + struct.new_parts.add(_elem1187); } iprot.readListEnd(); } @@ -87250,9 +87395,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1157 : struct.new_parts) + for (Partition _iter1189 : struct.new_parts) { - _iter1157.write(oprot); + _iter1189.write(oprot); } oprot.writeListEnd(); } @@ -87283,9 +87428,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1158 : struct.new_parts) + for (Partition _iter1190 : struct.new_parts) { - _iter1158.write(oprot); + _iter1190.write(oprot); } } } @@ -87297,14 +87442,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1159.size); - Partition _elem1160; - for (int _i1161 = 0; _i1161 < _list1159.size; ++_i1161) + org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1191.size); + Partition _elem1192; + for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) { - _elem1160 = new Partition(); - _elem1160.read(iprot); - struct.new_parts.add(_elem1160); + _elem1192 = new Partition(); + _elem1192.read(iprot); + struct.new_parts.add(_elem1192); } } struct.setNew_partsIsSet(true); @@ -88305,14 +88450,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1162 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1162.size); - PartitionSpec _elem1163; - for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) + org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1194.size); + PartitionSpec _elem1195; + for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) { - _elem1163 = new PartitionSpec(); - _elem1163.read(iprot); - struct.new_parts.add(_elem1163); + _elem1195 = new PartitionSpec(); + _elem1195.read(iprot); + struct.new_parts.add(_elem1195); } iprot.readListEnd(); } @@ -88338,9 +88483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1165 : struct.new_parts) + for (PartitionSpec _iter1197 : struct.new_parts) { - _iter1165.write(oprot); + _iter1197.write(oprot); } oprot.writeListEnd(); } @@ -88371,9 +88516,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1166 : struct.new_parts) + for (PartitionSpec _iter1198 : struct.new_parts) { - _iter1166.write(oprot); + _iter1198.write(oprot); } } } @@ -88385,14 +88530,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1167 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1167.size); - PartitionSpec _elem1168; - for (int _i1169 = 0; _i1169 < _list1167.size; ++_i1169) + org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1199.size); + PartitionSpec _elem1200; + for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) { - _elem1168 = new PartitionSpec(); - _elem1168.read(iprot); - struct.new_parts.add(_elem1168); + _elem1200 = new PartitionSpec(); + _elem1200.read(iprot); + struct.new_parts.add(_elem1200); } } struct.setNew_partsIsSet(true); @@ -89568,13 +89713,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1170 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1170.size); - String _elem1171; - for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) + org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1202.size); + String _elem1203; + for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) { - _elem1171 = iprot.readString(); - struct.part_vals.add(_elem1171); + _elem1203 = iprot.readString(); + struct.part_vals.add(_elem1203); } iprot.readListEnd(); } @@ -89610,9 +89755,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1173 : struct.part_vals) + for (String _iter1205 : struct.part_vals) { - oprot.writeString(_iter1173); + oprot.writeString(_iter1205); } oprot.writeListEnd(); } @@ -89655,9 +89800,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1174 : struct.part_vals) + for (String _iter1206 : struct.part_vals) { - oprot.writeString(_iter1174); + oprot.writeString(_iter1206); } } } @@ -89677,13 +89822,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1175.size); - String _elem1176; - for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) + org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1207.size); + String _elem1208; + for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) { - _elem1176 = iprot.readString(); - struct.part_vals.add(_elem1176); + _elem1208 = iprot.readString(); + struct.part_vals.add(_elem1208); } } struct.setPart_valsIsSet(true); @@ -91992,13 +92137,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1178.size); - String _elem1179; - for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) + org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1210.size); + String _elem1211; + for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) { - _elem1179 = iprot.readString(); - struct.part_vals.add(_elem1179); + _elem1211 = iprot.readString(); + struct.part_vals.add(_elem1211); } iprot.readListEnd(); } @@ -92043,9 +92188,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1181 : struct.part_vals) + for (String _iter1213 : struct.part_vals) { - oprot.writeString(_iter1181); + oprot.writeString(_iter1213); } oprot.writeListEnd(); } @@ -92096,9 +92241,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1182 : struct.part_vals) + for (String _iter1214 : struct.part_vals) { - oprot.writeString(_iter1182); + oprot.writeString(_iter1214); } } } @@ -92121,13 +92266,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1183.size); - String _elem1184; - for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) + org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1215.size); + String _elem1216; + for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) { - _elem1184 = iprot.readString(); - struct.part_vals.add(_elem1184); + _elem1216 = iprot.readString(); + struct.part_vals.add(_elem1216); } } struct.setPart_valsIsSet(true); @@ -95997,13 +96142,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1186.size); - String _elem1187; - for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) + org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1218.size); + String _elem1219; + for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) { - _elem1187 = iprot.readString(); - struct.part_vals.add(_elem1187); + _elem1219 = iprot.readString(); + struct.part_vals.add(_elem1219); } iprot.readListEnd(); } @@ -96047,9 +96192,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1189 : struct.part_vals) + for (String _iter1221 : struct.part_vals) { - oprot.writeString(_iter1189); + oprot.writeString(_iter1221); } oprot.writeListEnd(); } @@ -96098,9 +96243,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1190 : struct.part_vals) + for (String _iter1222 : struct.part_vals) { - oprot.writeString(_iter1190); + oprot.writeString(_iter1222); } } } @@ -96123,13 +96268,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1191.size); - String _elem1192; - for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) + org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1223.size); + String _elem1224; + for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) { - _elem1192 = iprot.readString(); - struct.part_vals.add(_elem1192); + _elem1224 = iprot.readString(); + struct.part_vals.add(_elem1224); } } struct.setPart_valsIsSet(true); @@ -97368,13 +97513,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1194.size); - String _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1226.size); + String _elem1227; + for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) { - _elem1195 = iprot.readString(); - struct.part_vals.add(_elem1195); + _elem1227 = iprot.readString(); + struct.part_vals.add(_elem1227); } iprot.readListEnd(); } @@ -97427,9 +97572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1197 : struct.part_vals) + for (String _iter1229 : struct.part_vals) { - oprot.writeString(_iter1197); + oprot.writeString(_iter1229); } oprot.writeListEnd(); } @@ -97486,9 +97631,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1198 : struct.part_vals) + for (String _iter1230 : struct.part_vals) { - oprot.writeString(_iter1198); + oprot.writeString(_iter1230); } } } @@ -97514,13 +97659,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1199.size); - String _elem1200; - for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) + org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1231.size); + String _elem1232; + for (int _i1233 = 0; _i1233 < _list1231.size; ++_i1233) { - _elem1200 = iprot.readString(); - struct.part_vals.add(_elem1200); + _elem1232 = iprot.readString(); + struct.part_vals.add(_elem1232); } } struct.setPart_valsIsSet(true); @@ -102122,13 +102267,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1202.size); - String _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1234.size); + String _elem1235; + for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) { - _elem1203 = iprot.readString(); - struct.part_vals.add(_elem1203); + _elem1235 = iprot.readString(); + struct.part_vals.add(_elem1235); } iprot.readListEnd(); } @@ -102164,9 +102309,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1205 : struct.part_vals) + for (String _iter1237 : struct.part_vals) { - oprot.writeString(_iter1205); + oprot.writeString(_iter1237); } oprot.writeListEnd(); } @@ -102209,9 +102354,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1206 : struct.part_vals) + for (String _iter1238 : struct.part_vals) { - oprot.writeString(_iter1206); + oprot.writeString(_iter1238); } } } @@ -102231,13 +102376,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1207.size); - String _elem1208; - for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) + org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1239.size); + String _elem1240; + for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) { - _elem1208 = iprot.readString(); - struct.part_vals.add(_elem1208); + _elem1240 = iprot.readString(); + struct.part_vals.add(_elem1240); } } struct.setPart_valsIsSet(true); @@ -103455,15 +103600,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1210 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1210.size); - String _key1211; - String _val1212; - for (int _i1213 = 0; _i1213 < _map1210.size; ++_i1213) + org.apache.thrift.protocol.TMap _map1242 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1242.size); + String _key1243; + String _val1244; + for (int _i1245 = 0; _i1245 < _map1242.size; ++_i1245) { - _key1211 = iprot.readString(); - _val1212 = iprot.readString(); - struct.partitionSpecs.put(_key1211, _val1212); + _key1243 = iprot.readString(); + _val1244 = iprot.readString(); + struct.partitionSpecs.put(_key1243, _val1244); } iprot.readMapEnd(); } @@ -103521,10 +103666,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1214 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1246 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1214.getKey()); - oprot.writeString(_iter1214.getValue()); + oprot.writeString(_iter1246.getKey()); + oprot.writeString(_iter1246.getValue()); } oprot.writeMapEnd(); } @@ -103587,10 +103732,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1215 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1247 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1215.getKey()); - oprot.writeString(_iter1215.getValue()); + oprot.writeString(_iter1247.getKey()); + oprot.writeString(_iter1247.getValue()); } } } @@ -103614,15 +103759,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1216 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1216.size); - String _key1217; - String _val1218; - for (int _i1219 = 0; _i1219 < _map1216.size; ++_i1219) + org.apache.thrift.protocol.TMap _map1248 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1248.size); + String _key1249; + String _val1250; + for (int _i1251 = 0; _i1251 < _map1248.size; ++_i1251) { - _key1217 = iprot.readString(); - _val1218 = iprot.readString(); - struct.partitionSpecs.put(_key1217, _val1218); + _key1249 = iprot.readString(); + _val1250 = iprot.readString(); + struct.partitionSpecs.put(_key1249, _val1250); } } struct.setPartitionSpecsIsSet(true); @@ -105068,15 +105213,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1220 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1220.size); - String _key1221; - String _val1222; - for (int _i1223 = 0; _i1223 < _map1220.size; ++_i1223) + org.apache.thrift.protocol.TMap _map1252 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1252.size); + String _key1253; + String _val1254; + for (int _i1255 = 0; _i1255 < _map1252.size; ++_i1255) { - _key1221 = iprot.readString(); - _val1222 = iprot.readString(); - struct.partitionSpecs.put(_key1221, _val1222); + _key1253 = iprot.readString(); + _val1254 = iprot.readString(); + struct.partitionSpecs.put(_key1253, _val1254); } iprot.readMapEnd(); } @@ -105134,10 +105279,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1224 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1256 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1224.getKey()); - oprot.writeString(_iter1224.getValue()); + oprot.writeString(_iter1256.getKey()); + oprot.writeString(_iter1256.getValue()); } oprot.writeMapEnd(); } @@ -105200,10 +105345,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1225 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1257 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1225.getKey()); - oprot.writeString(_iter1225.getValue()); + oprot.writeString(_iter1257.getKey()); + oprot.writeString(_iter1257.getValue()); } } } @@ -105227,15 +105372,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1226 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1226.size); - String _key1227; - String _val1228; - for (int _i1229 = 0; _i1229 < _map1226.size; ++_i1229) + org.apache.thrift.protocol.TMap _map1258 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1258.size); + String _key1259; + String _val1260; + for (int _i1261 = 0; _i1261 < _map1258.size; ++_i1261) { - _key1227 = iprot.readString(); - _val1228 = iprot.readString(); - struct.partitionSpecs.put(_key1227, _val1228); + _key1259 = iprot.readString(); + _val1260 = iprot.readString(); + struct.partitionSpecs.put(_key1259, _val1260); } } struct.setPartitionSpecsIsSet(true); @@ -105900,14 +106045,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1230 = iprot.readListBegin(); - struct.success = new ArrayList(_list1230.size); - Partition _elem1231; - for (int _i1232 = 0; _i1232 < _list1230.size; ++_i1232) + org.apache.thrift.protocol.TList _list1262 = iprot.readListBegin(); + struct.success = new ArrayList(_list1262.size); + Partition _elem1263; + for (int _i1264 = 0; _i1264 < _list1262.size; ++_i1264) { - _elem1231 = new Partition(); - _elem1231.read(iprot); - struct.success.add(_elem1231); + _elem1263 = new Partition(); + _elem1263.read(iprot); + struct.success.add(_elem1263); } iprot.readListEnd(); } @@ -105969,9 +106114,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1233 : struct.success) + for (Partition _iter1265 : struct.success) { - _iter1233.write(oprot); + _iter1265.write(oprot); } oprot.writeListEnd(); } @@ -106034,9 +106179,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1234 : struct.success) + for (Partition _iter1266 : struct.success) { - _iter1234.write(oprot); + _iter1266.write(oprot); } } } @@ -106060,14 +106205,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1235 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1235.size); - Partition _elem1236; - for (int _i1237 = 0; _i1237 < _list1235.size; ++_i1237) + org.apache.thrift.protocol.TList _list1267 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1267.size); + Partition _elem1268; + for (int _i1269 = 0; _i1269 < _list1267.size; ++_i1269) { - _elem1236 = new Partition(); - _elem1236.read(iprot); - struct.success.add(_elem1236); + _elem1268 = new Partition(); + _elem1268.read(iprot); + struct.success.add(_elem1268); } } struct.setSuccessIsSet(true); @@ -106766,13 +106911,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1238 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1238.size); - String _elem1239; - for (int _i1240 = 0; _i1240 < _list1238.size; ++_i1240) + org.apache.thrift.protocol.TList _list1270 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1270.size); + String _elem1271; + for (int _i1272 = 0; _i1272 < _list1270.size; ++_i1272) { - _elem1239 = iprot.readString(); - struct.part_vals.add(_elem1239); + _elem1271 = iprot.readString(); + struct.part_vals.add(_elem1271); } iprot.readListEnd(); } @@ -106792,13 +106937,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1241 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1241.size); - String _elem1242; - for (int _i1243 = 0; _i1243 < _list1241.size; ++_i1243) + org.apache.thrift.protocol.TList _list1273 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1273.size); + String _elem1274; + for (int _i1275 = 0; _i1275 < _list1273.size; ++_i1275) { - _elem1242 = iprot.readString(); - struct.group_names.add(_elem1242); + _elem1274 = iprot.readString(); + struct.group_names.add(_elem1274); } iprot.readListEnd(); } @@ -106834,9 +106979,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1244 : struct.part_vals) + for (String _iter1276 : struct.part_vals) { - oprot.writeString(_iter1244); + oprot.writeString(_iter1276); } oprot.writeListEnd(); } @@ -106851,9 +106996,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1245 : struct.group_names) + for (String _iter1277 : struct.group_names) { - oprot.writeString(_iter1245); + oprot.writeString(_iter1277); } oprot.writeListEnd(); } @@ -106902,9 +107047,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1246 : struct.part_vals) + for (String _iter1278 : struct.part_vals) { - oprot.writeString(_iter1246); + oprot.writeString(_iter1278); } } } @@ -106914,9 +107059,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1247 : struct.group_names) + for (String _iter1279 : struct.group_names) { - oprot.writeString(_iter1247); + oprot.writeString(_iter1279); } } } @@ -106936,13 +107081,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1248 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1248.size); - String _elem1249; - for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) + org.apache.thrift.protocol.TList _list1280 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1280.size); + String _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1249 = iprot.readString(); - struct.part_vals.add(_elem1249); + _elem1281 = iprot.readString(); + struct.part_vals.add(_elem1281); } } struct.setPart_valsIsSet(true); @@ -106953,13 +107098,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1251 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1251.size); - String _elem1252; - for (int _i1253 = 0; _i1253 < _list1251.size; ++_i1253) + org.apache.thrift.protocol.TList _list1283 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1283.size); + String _elem1284; + for (int _i1285 = 0; _i1285 < _list1283.size; ++_i1285) { - _elem1252 = iprot.readString(); - struct.group_names.add(_elem1252); + _elem1284 = iprot.readString(); + struct.group_names.add(_elem1284); } } struct.setGroup_namesIsSet(true); @@ -109728,14 +109873,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1254 = iprot.readListBegin(); - struct.success = new ArrayList(_list1254.size); - Partition _elem1255; - for (int _i1256 = 0; _i1256 < _list1254.size; ++_i1256) + org.apache.thrift.protocol.TList _list1286 = iprot.readListBegin(); + struct.success = new ArrayList(_list1286.size); + Partition _elem1287; + for (int _i1288 = 0; _i1288 < _list1286.size; ++_i1288) { - _elem1255 = new Partition(); - _elem1255.read(iprot); - struct.success.add(_elem1255); + _elem1287 = new Partition(); + _elem1287.read(iprot); + struct.success.add(_elem1287); } iprot.readListEnd(); } @@ -109779,9 +109924,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1257 : struct.success) + for (Partition _iter1289 : struct.success) { - _iter1257.write(oprot); + _iter1289.write(oprot); } oprot.writeListEnd(); } @@ -109828,9 +109973,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1258 : struct.success) + for (Partition _iter1290 : struct.success) { - _iter1258.write(oprot); + _iter1290.write(oprot); } } } @@ -109848,14 +109993,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1259 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1259.size); - Partition _elem1260; - for (int _i1261 = 0; _i1261 < _list1259.size; ++_i1261) + org.apache.thrift.protocol.TList _list1291 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1291.size); + Partition _elem1292; + for (int _i1293 = 0; _i1293 < _list1291.size; ++_i1293) { - _elem1260 = new Partition(); - _elem1260.read(iprot); - struct.success.add(_elem1260); + _elem1292 = new Partition(); + _elem1292.read(iprot); + struct.success.add(_elem1292); } } struct.setSuccessIsSet(true); @@ -110545,13 +110690,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1262 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1262.size); - String _elem1263; - for (int _i1264 = 0; _i1264 < _list1262.size; ++_i1264) + org.apache.thrift.protocol.TList _list1294 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1294.size); + String _elem1295; + for (int _i1296 = 0; _i1296 < _list1294.size; ++_i1296) { - _elem1263 = iprot.readString(); - struct.group_names.add(_elem1263); + _elem1295 = iprot.readString(); + struct.group_names.add(_elem1295); } iprot.readListEnd(); } @@ -110595,9 +110740,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1265 : struct.group_names) + for (String _iter1297 : struct.group_names) { - oprot.writeString(_iter1265); + oprot.writeString(_iter1297); } oprot.writeListEnd(); } @@ -110652,9 +110797,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1266 : struct.group_names) + for (String _iter1298 : struct.group_names) { - oprot.writeString(_iter1266); + oprot.writeString(_iter1298); } } } @@ -110682,13 +110827,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1267 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1267.size); - String _elem1268; - for (int _i1269 = 0; _i1269 < _list1267.size; ++_i1269) + org.apache.thrift.protocol.TList _list1299 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1299.size); + String _elem1300; + for (int _i1301 = 0; _i1301 < _list1299.size; ++_i1301) { - _elem1268 = iprot.readString(); - struct.group_names.add(_elem1268); + _elem1300 = iprot.readString(); + struct.group_names.add(_elem1300); } } struct.setGroup_namesIsSet(true); @@ -111175,14 +111320,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1270 = iprot.readListBegin(); - struct.success = new ArrayList(_list1270.size); - Partition _elem1271; - for (int _i1272 = 0; _i1272 < _list1270.size; ++_i1272) + org.apache.thrift.protocol.TList _list1302 = iprot.readListBegin(); + struct.success = new ArrayList(_list1302.size); + Partition _elem1303; + for (int _i1304 = 0; _i1304 < _list1302.size; ++_i1304) { - _elem1271 = new Partition(); - _elem1271.read(iprot); - struct.success.add(_elem1271); + _elem1303 = new Partition(); + _elem1303.read(iprot); + struct.success.add(_elem1303); } iprot.readListEnd(); } @@ -111226,9 +111371,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1273 : struct.success) + for (Partition _iter1305 : struct.success) { - _iter1273.write(oprot); + _iter1305.write(oprot); } oprot.writeListEnd(); } @@ -111275,9 +111420,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1274 : struct.success) + for (Partition _iter1306 : struct.success) { - _iter1274.write(oprot); + _iter1306.write(oprot); } } } @@ -111295,14 +111440,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1275 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1275.size); - Partition _elem1276; - for (int _i1277 = 0; _i1277 < _list1275.size; ++_i1277) + org.apache.thrift.protocol.TList _list1307 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1307.size); + Partition _elem1308; + for (int _i1309 = 0; _i1309 < _list1307.size; ++_i1309) { - _elem1276 = new Partition(); - _elem1276.read(iprot); - struct.success.add(_elem1276); + _elem1308 = new Partition(); + _elem1308.read(iprot); + struct.success.add(_elem1308); } } struct.setSuccessIsSet(true); @@ -112365,14 +112510,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1278 = iprot.readListBegin(); - struct.success = new ArrayList(_list1278.size); - PartitionSpec _elem1279; - for (int _i1280 = 0; _i1280 < _list1278.size; ++_i1280) + org.apache.thrift.protocol.TList _list1310 = iprot.readListBegin(); + struct.success = new ArrayList(_list1310.size); + PartitionSpec _elem1311; + for (int _i1312 = 0; _i1312 < _list1310.size; ++_i1312) { - _elem1279 = new PartitionSpec(); - _elem1279.read(iprot); - struct.success.add(_elem1279); + _elem1311 = new PartitionSpec(); + _elem1311.read(iprot); + struct.success.add(_elem1311); } iprot.readListEnd(); } @@ -112416,9 +112561,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1281 : struct.success) + for (PartitionSpec _iter1313 : struct.success) { - _iter1281.write(oprot); + _iter1313.write(oprot); } oprot.writeListEnd(); } @@ -112465,9 +112610,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1282 : struct.success) + for (PartitionSpec _iter1314 : struct.success) { - _iter1282.write(oprot); + _iter1314.write(oprot); } } } @@ -112485,14 +112630,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1283 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1283.size); - PartitionSpec _elem1284; - for (int _i1285 = 0; _i1285 < _list1283.size; ++_i1285) + org.apache.thrift.protocol.TList _list1315 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1315.size); + PartitionSpec _elem1316; + for (int _i1317 = 0; _i1317 < _list1315.size; ++_i1317) { - _elem1284 = new PartitionSpec(); - _elem1284.read(iprot); - struct.success.add(_elem1284); + _elem1316 = new PartitionSpec(); + _elem1316.read(iprot); + struct.success.add(_elem1316); } } struct.setSuccessIsSet(true); @@ -113552,13 +113697,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1286 = iprot.readListBegin(); - struct.success = new ArrayList(_list1286.size); - String _elem1287; - for (int _i1288 = 0; _i1288 < _list1286.size; ++_i1288) + org.apache.thrift.protocol.TList _list1318 = iprot.readListBegin(); + struct.success = new ArrayList(_list1318.size); + String _elem1319; + for (int _i1320 = 0; _i1320 < _list1318.size; ++_i1320) { - _elem1287 = iprot.readString(); - struct.success.add(_elem1287); + _elem1319 = iprot.readString(); + struct.success.add(_elem1319); } iprot.readListEnd(); } @@ -113602,9 +113747,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1289 : struct.success) + for (String _iter1321 : struct.success) { - oprot.writeString(_iter1289); + oprot.writeString(_iter1321); } oprot.writeListEnd(); } @@ -113651,9 +113796,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1290 : struct.success) + for (String _iter1322 : struct.success) { - oprot.writeString(_iter1290); + oprot.writeString(_iter1322); } } } @@ -113671,13 +113816,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1291 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1291.size); - String _elem1292; - for (int _i1293 = 0; _i1293 < _list1291.size; ++_i1293) + org.apache.thrift.protocol.TList _list1323 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1323.size); + String _elem1324; + for (int _i1325 = 0; _i1325 < _list1323.size; ++_i1325) { - _elem1292 = iprot.readString(); - struct.success.add(_elem1292); + _elem1324 = iprot.readString(); + struct.success.add(_elem1324); } } struct.setSuccessIsSet(true); @@ -115208,13 +115353,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1294 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1294.size); - String _elem1295; - for (int _i1296 = 0; _i1296 < _list1294.size; ++_i1296) + org.apache.thrift.protocol.TList _list1326 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1326.size); + String _elem1327; + for (int _i1328 = 0; _i1328 < _list1326.size; ++_i1328) { - _elem1295 = iprot.readString(); - struct.part_vals.add(_elem1295); + _elem1327 = iprot.readString(); + struct.part_vals.add(_elem1327); } iprot.readListEnd(); } @@ -115258,9 +115403,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1297 : struct.part_vals) + for (String _iter1329 : struct.part_vals) { - oprot.writeString(_iter1297); + oprot.writeString(_iter1329); } oprot.writeListEnd(); } @@ -115309,9 +115454,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1298 : struct.part_vals) + for (String _iter1330 : struct.part_vals) { - oprot.writeString(_iter1298); + oprot.writeString(_iter1330); } } } @@ -115334,13 +115479,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1299 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1299.size); - String _elem1300; - for (int _i1301 = 0; _i1301 < _list1299.size; ++_i1301) + org.apache.thrift.protocol.TList _list1331 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1331.size); + String _elem1332; + for (int _i1333 = 0; _i1333 < _list1331.size; ++_i1333) { - _elem1300 = iprot.readString(); - struct.part_vals.add(_elem1300); + _elem1332 = iprot.readString(); + struct.part_vals.add(_elem1332); } } struct.setPart_valsIsSet(true); @@ -115831,14 +115976,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1302 = iprot.readListBegin(); - struct.success = new ArrayList(_list1302.size); - Partition _elem1303; - for (int _i1304 = 0; _i1304 < _list1302.size; ++_i1304) + org.apache.thrift.protocol.TList _list1334 = iprot.readListBegin(); + struct.success = new ArrayList(_list1334.size); + Partition _elem1335; + for (int _i1336 = 0; _i1336 < _list1334.size; ++_i1336) { - _elem1303 = new Partition(); - _elem1303.read(iprot); - struct.success.add(_elem1303); + _elem1335 = new Partition(); + _elem1335.read(iprot); + struct.success.add(_elem1335); } iprot.readListEnd(); } @@ -115882,9 +116027,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1305 : struct.success) + for (Partition _iter1337 : struct.success) { - _iter1305.write(oprot); + _iter1337.write(oprot); } oprot.writeListEnd(); } @@ -115931,9 +116076,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1306 : struct.success) + for (Partition _iter1338 : struct.success) { - _iter1306.write(oprot); + _iter1338.write(oprot); } } } @@ -115951,14 +116096,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1307 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1307.size); - Partition _elem1308; - for (int _i1309 = 0; _i1309 < _list1307.size; ++_i1309) + org.apache.thrift.protocol.TList _list1339 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1339.size); + Partition _elem1340; + for (int _i1341 = 0; _i1341 < _list1339.size; ++_i1341) { - _elem1308 = new Partition(); - _elem1308.read(iprot); - struct.success.add(_elem1308); + _elem1340 = new Partition(); + _elem1340.read(iprot); + struct.success.add(_elem1340); } } struct.setSuccessIsSet(true); @@ -116730,13 +116875,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1310 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1310.size); - String _elem1311; - for (int _i1312 = 0; _i1312 < _list1310.size; ++_i1312) + org.apache.thrift.protocol.TList _list1342 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1342.size); + String _elem1343; + for (int _i1344 = 0; _i1344 < _list1342.size; ++_i1344) { - _elem1311 = iprot.readString(); - struct.part_vals.add(_elem1311); + _elem1343 = iprot.readString(); + struct.part_vals.add(_elem1343); } iprot.readListEnd(); } @@ -116764,13 +116909,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1313 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1313.size); - String _elem1314; - for (int _i1315 = 0; _i1315 < _list1313.size; ++_i1315) + org.apache.thrift.protocol.TList _list1345 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1345.size); + String _elem1346; + for (int _i1347 = 0; _i1347 < _list1345.size; ++_i1347) { - _elem1314 = iprot.readString(); - struct.group_names.add(_elem1314); + _elem1346 = iprot.readString(); + struct.group_names.add(_elem1346); } iprot.readListEnd(); } @@ -116806,9 +116951,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1316 : struct.part_vals) + for (String _iter1348 : struct.part_vals) { - oprot.writeString(_iter1316); + oprot.writeString(_iter1348); } oprot.writeListEnd(); } @@ -116826,9 +116971,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1317 : struct.group_names) + for (String _iter1349 : struct.group_names) { - oprot.writeString(_iter1317); + oprot.writeString(_iter1349); } oprot.writeListEnd(); } @@ -116880,9 +117025,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1318 : struct.part_vals) + for (String _iter1350 : struct.part_vals) { - oprot.writeString(_iter1318); + oprot.writeString(_iter1350); } } } @@ -116895,9 +117040,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1319 : struct.group_names) + for (String _iter1351 : struct.group_names) { - oprot.writeString(_iter1319); + oprot.writeString(_iter1351); } } } @@ -116917,13 +117062,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1320 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1320.size); - String _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1352 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1321 = iprot.readString(); - struct.part_vals.add(_elem1321); + _elem1353 = iprot.readString(); + struct.part_vals.add(_elem1353); } } struct.setPart_valsIsSet(true); @@ -116938,13 +117083,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1323 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1323.size); - String _elem1324; - for (int _i1325 = 0; _i1325 < _list1323.size; ++_i1325) + org.apache.thrift.protocol.TList _list1355 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1355.size); + String _elem1356; + for (int _i1357 = 0; _i1357 < _list1355.size; ++_i1357) { - _elem1324 = iprot.readString(); - struct.group_names.add(_elem1324); + _elem1356 = iprot.readString(); + struct.group_names.add(_elem1356); } } struct.setGroup_namesIsSet(true); @@ -117431,14 +117576,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1326 = iprot.readListBegin(); - struct.success = new ArrayList(_list1326.size); - Partition _elem1327; - for (int _i1328 = 0; _i1328 < _list1326.size; ++_i1328) + org.apache.thrift.protocol.TList _list1358 = iprot.readListBegin(); + struct.success = new ArrayList(_list1358.size); + Partition _elem1359; + for (int _i1360 = 0; _i1360 < _list1358.size; ++_i1360) { - _elem1327 = new Partition(); - _elem1327.read(iprot); - struct.success.add(_elem1327); + _elem1359 = new Partition(); + _elem1359.read(iprot); + struct.success.add(_elem1359); } iprot.readListEnd(); } @@ -117482,9 +117627,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1329 : struct.success) + for (Partition _iter1361 : struct.success) { - _iter1329.write(oprot); + _iter1361.write(oprot); } oprot.writeListEnd(); } @@ -117531,9 +117676,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1330 : struct.success) + for (Partition _iter1362 : struct.success) { - _iter1330.write(oprot); + _iter1362.write(oprot); } } } @@ -117551,14 +117696,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1331 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1331.size); - Partition _elem1332; - for (int _i1333 = 0; _i1333 < _list1331.size; ++_i1333) + org.apache.thrift.protocol.TList _list1363 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1363.size); + Partition _elem1364; + for (int _i1365 = 0; _i1365 < _list1363.size; ++_i1365) { - _elem1332 = new Partition(); - _elem1332.read(iprot); - struct.success.add(_elem1332); + _elem1364 = new Partition(); + _elem1364.read(iprot); + struct.success.add(_elem1364); } } struct.setSuccessIsSet(true); @@ -118151,13 +118296,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1334 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1334.size); - String _elem1335; - for (int _i1336 = 0; _i1336 < _list1334.size; ++_i1336) + org.apache.thrift.protocol.TList _list1366 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1366.size); + String _elem1367; + for (int _i1368 = 0; _i1368 < _list1366.size; ++_i1368) { - _elem1335 = iprot.readString(); - struct.part_vals.add(_elem1335); + _elem1367 = iprot.readString(); + struct.part_vals.add(_elem1367); } iprot.readListEnd(); } @@ -118201,9 +118346,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1337 : struct.part_vals) + for (String _iter1369 : struct.part_vals) { - oprot.writeString(_iter1337); + oprot.writeString(_iter1369); } oprot.writeListEnd(); } @@ -118252,9 +118397,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1338 : struct.part_vals) + for (String _iter1370 : struct.part_vals) { - oprot.writeString(_iter1338); + oprot.writeString(_iter1370); } } } @@ -118277,13 +118422,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1339 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1339.size); - String _elem1340; - for (int _i1341 = 0; _i1341 < _list1339.size; ++_i1341) + org.apache.thrift.protocol.TList _list1371 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1371.size); + String _elem1372; + for (int _i1373 = 0; _i1373 < _list1371.size; ++_i1373) { - _elem1340 = iprot.readString(); - struct.part_vals.add(_elem1340); + _elem1372 = iprot.readString(); + struct.part_vals.add(_elem1372); } } struct.setPart_valsIsSet(true); @@ -118771,13 +118916,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1342 = iprot.readListBegin(); - struct.success = new ArrayList(_list1342.size); - String _elem1343; - for (int _i1344 = 0; _i1344 < _list1342.size; ++_i1344) + org.apache.thrift.protocol.TList _list1374 = iprot.readListBegin(); + struct.success = new ArrayList(_list1374.size); + String _elem1375; + for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) { - _elem1343 = iprot.readString(); - struct.success.add(_elem1343); + _elem1375 = iprot.readString(); + struct.success.add(_elem1375); } iprot.readListEnd(); } @@ -118821,9 +118966,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1345 : struct.success) + for (String _iter1377 : struct.success) { - oprot.writeString(_iter1345); + oprot.writeString(_iter1377); } oprot.writeListEnd(); } @@ -118870,9 +119015,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1346 : struct.success) + for (String _iter1378 : struct.success) { - oprot.writeString(_iter1346); + oprot.writeString(_iter1378); } } } @@ -118890,13 +119035,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1347 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1347.size); - String _elem1348; - for (int _i1349 = 0; _i1349 < _list1347.size; ++_i1349) + org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1379.size); + String _elem1380; + for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) { - _elem1348 = iprot.readString(); - struct.success.add(_elem1348); + _elem1380 = iprot.readString(); + struct.success.add(_elem1380); } } struct.setSuccessIsSet(true); @@ -120063,14 +120208,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1350 = iprot.readListBegin(); - struct.success = new ArrayList(_list1350.size); - Partition _elem1351; - for (int _i1352 = 0; _i1352 < _list1350.size; ++_i1352) + org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); + struct.success = new ArrayList(_list1382.size); + Partition _elem1383; + for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) { - _elem1351 = new Partition(); - _elem1351.read(iprot); - struct.success.add(_elem1351); + _elem1383 = new Partition(); + _elem1383.read(iprot); + struct.success.add(_elem1383); } iprot.readListEnd(); } @@ -120114,9 +120259,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1353 : struct.success) + for (Partition _iter1385 : struct.success) { - _iter1353.write(oprot); + _iter1385.write(oprot); } oprot.writeListEnd(); } @@ -120163,9 +120308,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1354 : struct.success) + for (Partition _iter1386 : struct.success) { - _iter1354.write(oprot); + _iter1386.write(oprot); } } } @@ -120183,14 +120328,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1355 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1355.size); - Partition _elem1356; - for (int _i1357 = 0; _i1357 < _list1355.size; ++_i1357) + org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1387.size); + Partition _elem1388; + for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) { - _elem1356 = new Partition(); - _elem1356.read(iprot); - struct.success.add(_elem1356); + _elem1388 = new Partition(); + _elem1388.read(iprot); + struct.success.add(_elem1388); } } struct.setSuccessIsSet(true); @@ -121357,14 +121502,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1358 = iprot.readListBegin(); - struct.success = new ArrayList(_list1358.size); - PartitionSpec _elem1359; - for (int _i1360 = 0; _i1360 < _list1358.size; ++_i1360) + org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); + struct.success = new ArrayList(_list1390.size); + PartitionSpec _elem1391; + for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) { - _elem1359 = new PartitionSpec(); - _elem1359.read(iprot); - struct.success.add(_elem1359); + _elem1391 = new PartitionSpec(); + _elem1391.read(iprot); + struct.success.add(_elem1391); } iprot.readListEnd(); } @@ -121408,9 +121553,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1361 : struct.success) + for (PartitionSpec _iter1393 : struct.success) { - _iter1361.write(oprot); + _iter1393.write(oprot); } oprot.writeListEnd(); } @@ -121457,9 +121602,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1362 : struct.success) + for (PartitionSpec _iter1394 : struct.success) { - _iter1362.write(oprot); + _iter1394.write(oprot); } } } @@ -121477,14 +121622,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1363 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1363.size); - PartitionSpec _elem1364; - for (int _i1365 = 0; _i1365 < _list1363.size; ++_i1365) + org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1395.size); + PartitionSpec _elem1396; + for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) { - _elem1364 = new PartitionSpec(); - _elem1364.read(iprot); - struct.success.add(_elem1364); + _elem1396 = new PartitionSpec(); + _elem1396.read(iprot); + struct.success.add(_elem1396); } } struct.setSuccessIsSet(true); @@ -124068,13 +124213,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1366 = iprot.readListBegin(); - struct.names = new ArrayList(_list1366.size); - String _elem1367; - for (int _i1368 = 0; _i1368 < _list1366.size; ++_i1368) + org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); + struct.names = new ArrayList(_list1398.size); + String _elem1399; + for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) { - _elem1367 = iprot.readString(); - struct.names.add(_elem1367); + _elem1399 = iprot.readString(); + struct.names.add(_elem1399); } iprot.readListEnd(); } @@ -124110,9 +124255,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1369 : struct.names) + for (String _iter1401 : struct.names) { - oprot.writeString(_iter1369); + oprot.writeString(_iter1401); } oprot.writeListEnd(); } @@ -124155,9 +124300,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1370 : struct.names) + for (String _iter1402 : struct.names) { - oprot.writeString(_iter1370); + oprot.writeString(_iter1402); } } } @@ -124177,13 +124322,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1371 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1371.size); - String _elem1372; - for (int _i1373 = 0; _i1373 < _list1371.size; ++_i1373) + org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1403.size); + String _elem1404; + for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) { - _elem1372 = iprot.readString(); - struct.names.add(_elem1372); + _elem1404 = iprot.readString(); + struct.names.add(_elem1404); } } struct.setNamesIsSet(true); @@ -124670,14 +124815,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1374 = iprot.readListBegin(); - struct.success = new ArrayList(_list1374.size); - Partition _elem1375; - for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) + org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); + struct.success = new ArrayList(_list1406.size); + Partition _elem1407; + for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) { - _elem1375 = new Partition(); - _elem1375.read(iprot); - struct.success.add(_elem1375); + _elem1407 = new Partition(); + _elem1407.read(iprot); + struct.success.add(_elem1407); } iprot.readListEnd(); } @@ -124721,9 +124866,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1377 : struct.success) + for (Partition _iter1409 : struct.success) { - _iter1377.write(oprot); + _iter1409.write(oprot); } oprot.writeListEnd(); } @@ -124770,9 +124915,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1378 : struct.success) + for (Partition _iter1410 : struct.success) { - _iter1378.write(oprot); + _iter1410.write(oprot); } } } @@ -124790,14 +124935,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1379.size); - Partition _elem1380; - for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) + org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1411.size); + Partition _elem1412; + for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) { - _elem1380 = new Partition(); - _elem1380.read(iprot); - struct.success.add(_elem1380); + _elem1412 = new Partition(); + _elem1412.read(iprot); + struct.success.add(_elem1412); } } struct.setSuccessIsSet(true); @@ -126347,14 +126492,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1382.size); - Partition _elem1383; - for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) + org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1414.size); + Partition _elem1415; + for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) { - _elem1383 = new Partition(); - _elem1383.read(iprot); - struct.new_parts.add(_elem1383); + _elem1415 = new Partition(); + _elem1415.read(iprot); + struct.new_parts.add(_elem1415); } iprot.readListEnd(); } @@ -126390,9 +126535,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1385 : struct.new_parts) + for (Partition _iter1417 : struct.new_parts) { - _iter1385.write(oprot); + _iter1417.write(oprot); } oprot.writeListEnd(); } @@ -126435,9 +126580,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1386 : struct.new_parts) + for (Partition _iter1418 : struct.new_parts) { - _iter1386.write(oprot); + _iter1418.write(oprot); } } } @@ -126457,14 +126602,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1387.size); - Partition _elem1388; - for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) + org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1419.size); + Partition _elem1420; + for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) { - _elem1388 = new Partition(); - _elem1388.read(iprot); - struct.new_parts.add(_elem1388); + _elem1420 = new Partition(); + _elem1420.read(iprot); + struct.new_parts.add(_elem1420); } } struct.setNew_partsIsSet(true); @@ -127517,14 +127662,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1390.size); - Partition _elem1391; - for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) + org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1422.size); + Partition _elem1423; + for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) { - _elem1391 = new Partition(); - _elem1391.read(iprot); - struct.new_parts.add(_elem1391); + _elem1423 = new Partition(); + _elem1423.read(iprot); + struct.new_parts.add(_elem1423); } iprot.readListEnd(); } @@ -127569,9 +127714,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1393 : struct.new_parts) + for (Partition _iter1425 : struct.new_parts) { - _iter1393.write(oprot); + _iter1425.write(oprot); } oprot.writeListEnd(); } @@ -127622,9 +127767,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1394 : struct.new_parts) + for (Partition _iter1426 : struct.new_parts) { - _iter1394.write(oprot); + _iter1426.write(oprot); } } } @@ -127647,14 +127792,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1395.size); - Partition _elem1396; - for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) + org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1427.size); + Partition _elem1428; + for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) { - _elem1396 = new Partition(); - _elem1396.read(iprot); - struct.new_parts.add(_elem1396); + _elem1428 = new Partition(); + _elem1428.read(iprot); + struct.new_parts.add(_elem1428); } } struct.setNew_partsIsSet(true); @@ -130793,13 +130938,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1398.size); - String _elem1399; - for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) + org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1430.size); + String _elem1431; + for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) { - _elem1399 = iprot.readString(); - struct.part_vals.add(_elem1399); + _elem1431 = iprot.readString(); + struct.part_vals.add(_elem1431); } iprot.readListEnd(); } @@ -130844,9 +130989,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1401 : struct.part_vals) + for (String _iter1433 : struct.part_vals) { - oprot.writeString(_iter1401); + oprot.writeString(_iter1433); } oprot.writeListEnd(); } @@ -130897,9 +131042,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1402 : struct.part_vals) + for (String _iter1434 : struct.part_vals) { - oprot.writeString(_iter1402); + oprot.writeString(_iter1434); } } } @@ -130922,13 +131067,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1403.size); - String _elem1404; - for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) + org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1435.size); + String _elem1436; + for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) { - _elem1404 = iprot.readString(); - struct.part_vals.add(_elem1404); + _elem1436 = iprot.readString(); + struct.part_vals.add(_elem1436); } } struct.setPart_valsIsSet(true); @@ -132740,13 +132885,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1406.size); - String _elem1407; - for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) + org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1438.size); + String _elem1439; + for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) { - _elem1407 = iprot.readString(); - struct.part_vals.add(_elem1407); + _elem1439 = iprot.readString(); + struct.part_vals.add(_elem1439); } iprot.readListEnd(); } @@ -132780,9 +132925,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1409 : struct.part_vals) + for (String _iter1441 : struct.part_vals) { - oprot.writeString(_iter1409); + oprot.writeString(_iter1441); } oprot.writeListEnd(); } @@ -132819,9 +132964,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1410 : struct.part_vals) + for (String _iter1442 : struct.part_vals) { - oprot.writeString(_iter1410); + oprot.writeString(_iter1442); } } } @@ -132836,13 +132981,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1411.size); - String _elem1412; - for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) + org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1443.size); + String _elem1444; + for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) { - _elem1412 = iprot.readString(); - struct.part_vals.add(_elem1412); + _elem1444 = iprot.readString(); + struct.part_vals.add(_elem1444); } } struct.setPart_valsIsSet(true); @@ -134997,13 +135142,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); - struct.success = new ArrayList(_list1414.size); - String _elem1415; - for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) + org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); + struct.success = new ArrayList(_list1446.size); + String _elem1447; + for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) { - _elem1415 = iprot.readString(); - struct.success.add(_elem1415); + _elem1447 = iprot.readString(); + struct.success.add(_elem1447); } iprot.readListEnd(); } @@ -135038,9 +135183,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1417 : struct.success) + for (String _iter1449 : struct.success) { - oprot.writeString(_iter1417); + oprot.writeString(_iter1449); } oprot.writeListEnd(); } @@ -135079,9 +135224,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1418 : struct.success) + for (String _iter1450 : struct.success) { - oprot.writeString(_iter1418); + oprot.writeString(_iter1450); } } } @@ -135096,13 +135241,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1419.size); - String _elem1420; - for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) + org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1451.size); + String _elem1452; + for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) { - _elem1420 = iprot.readString(); - struct.success.add(_elem1420); + _elem1452 = iprot.readString(); + struct.success.add(_elem1452); } } struct.setSuccessIsSet(true); @@ -135865,15 +136010,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1422 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1422.size); - String _key1423; - String _val1424; - for (int _i1425 = 0; _i1425 < _map1422.size; ++_i1425) + org.apache.thrift.protocol.TMap _map1454 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1454.size); + String _key1455; + String _val1456; + for (int _i1457 = 0; _i1457 < _map1454.size; ++_i1457) { - _key1423 = iprot.readString(); - _val1424 = iprot.readString(); - struct.success.put(_key1423, _val1424); + _key1455 = iprot.readString(); + _val1456 = iprot.readString(); + struct.success.put(_key1455, _val1456); } iprot.readMapEnd(); } @@ -135908,10 +136053,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1426 : struct.success.entrySet()) + for (Map.Entry _iter1458 : struct.success.entrySet()) { - oprot.writeString(_iter1426.getKey()); - oprot.writeString(_iter1426.getValue()); + oprot.writeString(_iter1458.getKey()); + oprot.writeString(_iter1458.getValue()); } oprot.writeMapEnd(); } @@ -135950,10 +136095,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1427 : struct.success.entrySet()) + for (Map.Entry _iter1459 : struct.success.entrySet()) { - oprot.writeString(_iter1427.getKey()); - oprot.writeString(_iter1427.getValue()); + oprot.writeString(_iter1459.getKey()); + oprot.writeString(_iter1459.getValue()); } } } @@ -135968,15 +136113,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1428 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1428.size); - String _key1429; - String _val1430; - for (int _i1431 = 0; _i1431 < _map1428.size; ++_i1431) + org.apache.thrift.protocol.TMap _map1460 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1460.size); + String _key1461; + String _val1462; + for (int _i1463 = 0; _i1463 < _map1460.size; ++_i1463) { - _key1429 = iprot.readString(); - _val1430 = iprot.readString(); - struct.success.put(_key1429, _val1430); + _key1461 = iprot.readString(); + _val1462 = iprot.readString(); + struct.success.put(_key1461, _val1462); } } struct.setSuccessIsSet(true); @@ -136571,15 +136716,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1432 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1432.size); - String _key1433; - String _val1434; - for (int _i1435 = 0; _i1435 < _map1432.size; ++_i1435) + org.apache.thrift.protocol.TMap _map1464 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1464.size); + String _key1465; + String _val1466; + for (int _i1467 = 0; _i1467 < _map1464.size; ++_i1467) { - _key1433 = iprot.readString(); - _val1434 = iprot.readString(); - struct.part_vals.put(_key1433, _val1434); + _key1465 = iprot.readString(); + _val1466 = iprot.readString(); + struct.part_vals.put(_key1465, _val1466); } iprot.readMapEnd(); } @@ -136623,10 +136768,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1436 : struct.part_vals.entrySet()) + for (Map.Entry _iter1468 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1436.getKey()); - oprot.writeString(_iter1436.getValue()); + oprot.writeString(_iter1468.getKey()); + oprot.writeString(_iter1468.getValue()); } oprot.writeMapEnd(); } @@ -136677,10 +136822,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1437 : struct.part_vals.entrySet()) + for (Map.Entry _iter1469 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1437.getKey()); - oprot.writeString(_iter1437.getValue()); + oprot.writeString(_iter1469.getKey()); + oprot.writeString(_iter1469.getValue()); } } } @@ -136703,15 +136848,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1438 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1438.size); - String _key1439; - String _val1440; - for (int _i1441 = 0; _i1441 < _map1438.size; ++_i1441) + org.apache.thrift.protocol.TMap _map1470 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1470.size); + String _key1471; + String _val1472; + for (int _i1473 = 0; _i1473 < _map1470.size; ++_i1473) { - _key1439 = iprot.readString(); - _val1440 = iprot.readString(); - struct.part_vals.put(_key1439, _val1440); + _key1471 = iprot.readString(); + _val1472 = iprot.readString(); + struct.part_vals.put(_key1471, _val1472); } } struct.setPart_valsIsSet(true); @@ -138195,15 +138340,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1442 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1442.size); - String _key1443; - String _val1444; - for (int _i1445 = 0; _i1445 < _map1442.size; ++_i1445) + org.apache.thrift.protocol.TMap _map1474 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1474.size); + String _key1475; + String _val1476; + for (int _i1477 = 0; _i1477 < _map1474.size; ++_i1477) { - _key1443 = iprot.readString(); - _val1444 = iprot.readString(); - struct.part_vals.put(_key1443, _val1444); + _key1475 = iprot.readString(); + _val1476 = iprot.readString(); + struct.part_vals.put(_key1475, _val1476); } iprot.readMapEnd(); } @@ -138247,10 +138392,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1446 : struct.part_vals.entrySet()) + for (Map.Entry _iter1478 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1446.getKey()); - oprot.writeString(_iter1446.getValue()); + oprot.writeString(_iter1478.getKey()); + oprot.writeString(_iter1478.getValue()); } oprot.writeMapEnd(); } @@ -138301,10 +138446,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1447 : struct.part_vals.entrySet()) + for (Map.Entry _iter1479 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1447.getKey()); - oprot.writeString(_iter1447.getValue()); + oprot.writeString(_iter1479.getKey()); + oprot.writeString(_iter1479.getValue()); } } } @@ -138327,15 +138472,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1448 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1448.size); - String _key1449; - String _val1450; - for (int _i1451 = 0; _i1451 < _map1448.size; ++_i1451) + org.apache.thrift.protocol.TMap _map1480 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1480.size); + String _key1481; + String _val1482; + for (int _i1483 = 0; _i1483 < _map1480.size; ++_i1483) { - _key1449 = iprot.readString(); - _val1450 = iprot.readString(); - struct.part_vals.put(_key1449, _val1450); + _key1481 = iprot.readString(); + _val1482 = iprot.readString(); + struct.part_vals.put(_key1481, _val1482); } } struct.setPart_valsIsSet(true); @@ -162991,13 +163136,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1452 = iprot.readListBegin(); - struct.success = new ArrayList(_list1452.size); - String _elem1453; - for (int _i1454 = 0; _i1454 < _list1452.size; ++_i1454) + org.apache.thrift.protocol.TList _list1484 = iprot.readListBegin(); + struct.success = new ArrayList(_list1484.size); + String _elem1485; + for (int _i1486 = 0; _i1486 < _list1484.size; ++_i1486) { - _elem1453 = iprot.readString(); - struct.success.add(_elem1453); + _elem1485 = iprot.readString(); + struct.success.add(_elem1485); } iprot.readListEnd(); } @@ -163032,9 +163177,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1455 : struct.success) + for (String _iter1487 : struct.success) { - oprot.writeString(_iter1455); + oprot.writeString(_iter1487); } oprot.writeListEnd(); } @@ -163073,9 +163218,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1456 : struct.success) + for (String _iter1488 : struct.success) { - oprot.writeString(_iter1456); + oprot.writeString(_iter1488); } } } @@ -163090,13 +163235,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1457 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1457.size); - String _elem1458; - for (int _i1459 = 0; _i1459 < _list1457.size; ++_i1459) + org.apache.thrift.protocol.TList _list1489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1489.size); + String _elem1490; + for (int _i1491 = 0; _i1491 < _list1489.size; ++_i1491) { - _elem1458 = iprot.readString(); - struct.success.add(_elem1458); + _elem1490 = iprot.readString(); + struct.success.add(_elem1490); } } struct.setSuccessIsSet(true); @@ -167151,13 +167296,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1460 = iprot.readListBegin(); - struct.success = new ArrayList(_list1460.size); - String _elem1461; - for (int _i1462 = 0; _i1462 < _list1460.size; ++_i1462) + org.apache.thrift.protocol.TList _list1492 = iprot.readListBegin(); + struct.success = new ArrayList(_list1492.size); + String _elem1493; + for (int _i1494 = 0; _i1494 < _list1492.size; ++_i1494) { - _elem1461 = iprot.readString(); - struct.success.add(_elem1461); + _elem1493 = iprot.readString(); + struct.success.add(_elem1493); } iprot.readListEnd(); } @@ -167192,9 +167337,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1463 : struct.success) + for (String _iter1495 : struct.success) { - oprot.writeString(_iter1463); + oprot.writeString(_iter1495); } oprot.writeListEnd(); } @@ -167233,9 +167378,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1464 : struct.success) + for (String _iter1496 : struct.success) { - oprot.writeString(_iter1464); + oprot.writeString(_iter1496); } } } @@ -167250,13 +167395,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1465.size); - String _elem1466; - for (int _i1467 = 0; _i1467 < _list1465.size; ++_i1467) + org.apache.thrift.protocol.TList _list1497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1497.size); + String _elem1498; + for (int _i1499 = 0; _i1499 < _list1497.size; ++_i1499) { - _elem1466 = iprot.readString(); - struct.success.add(_elem1466); + _elem1498 = iprot.readString(); + struct.success.add(_elem1498); } } struct.setSuccessIsSet(true); @@ -170547,14 +170692,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1468 = iprot.readListBegin(); - struct.success = new ArrayList(_list1468.size); - Role _elem1469; - for (int _i1470 = 0; _i1470 < _list1468.size; ++_i1470) + org.apache.thrift.protocol.TList _list1500 = iprot.readListBegin(); + struct.success = new ArrayList(_list1500.size); + Role _elem1501; + for (int _i1502 = 0; _i1502 < _list1500.size; ++_i1502) { - _elem1469 = new Role(); - _elem1469.read(iprot); - struct.success.add(_elem1469); + _elem1501 = new Role(); + _elem1501.read(iprot); + struct.success.add(_elem1501); } iprot.readListEnd(); } @@ -170589,9 +170734,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1471 : struct.success) + for (Role _iter1503 : struct.success) { - _iter1471.write(oprot); + _iter1503.write(oprot); } oprot.writeListEnd(); } @@ -170630,9 +170775,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1472 : struct.success) + for (Role _iter1504 : struct.success) { - _iter1472.write(oprot); + _iter1504.write(oprot); } } } @@ -170647,14 +170792,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1473 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1473.size); - Role _elem1474; - for (int _i1475 = 0; _i1475 < _list1473.size; ++_i1475) + org.apache.thrift.protocol.TList _list1505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1505.size); + Role _elem1506; + for (int _i1507 = 0; _i1507 < _list1505.size; ++_i1507) { - _elem1474 = new Role(); - _elem1474.read(iprot); - struct.success.add(_elem1474); + _elem1506 = new Role(); + _elem1506.read(iprot); + struct.success.add(_elem1506); } } struct.setSuccessIsSet(true); @@ -173659,13 +173804,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1476 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1476.size); - String _elem1477; - for (int _i1478 = 0; _i1478 < _list1476.size; ++_i1478) + org.apache.thrift.protocol.TList _list1508 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1508.size); + String _elem1509; + for (int _i1510 = 0; _i1510 < _list1508.size; ++_i1510) { - _elem1477 = iprot.readString(); - struct.group_names.add(_elem1477); + _elem1509 = iprot.readString(); + struct.group_names.add(_elem1509); } iprot.readListEnd(); } @@ -173701,9 +173846,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1479 : struct.group_names) + for (String _iter1511 : struct.group_names) { - oprot.writeString(_iter1479); + oprot.writeString(_iter1511); } oprot.writeListEnd(); } @@ -173746,9 +173891,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1480 : struct.group_names) + for (String _iter1512 : struct.group_names) { - oprot.writeString(_iter1480); + oprot.writeString(_iter1512); } } } @@ -173769,13 +173914,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1481 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1481.size); - String _elem1482; - for (int _i1483 = 0; _i1483 < _list1481.size; ++_i1483) + org.apache.thrift.protocol.TList _list1513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1513.size); + String _elem1514; + for (int _i1515 = 0; _i1515 < _list1513.size; ++_i1515) { - _elem1482 = iprot.readString(); - struct.group_names.add(_elem1482); + _elem1514 = iprot.readString(); + struct.group_names.add(_elem1514); } } struct.setGroup_namesIsSet(true); @@ -175233,14 +175378,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1484 = iprot.readListBegin(); - struct.success = new ArrayList(_list1484.size); - HiveObjectPrivilege _elem1485; - for (int _i1486 = 0; _i1486 < _list1484.size; ++_i1486) + org.apache.thrift.protocol.TList _list1516 = iprot.readListBegin(); + struct.success = new ArrayList(_list1516.size); + HiveObjectPrivilege _elem1517; + for (int _i1518 = 0; _i1518 < _list1516.size; ++_i1518) { - _elem1485 = new HiveObjectPrivilege(); - _elem1485.read(iprot); - struct.success.add(_elem1485); + _elem1517 = new HiveObjectPrivilege(); + _elem1517.read(iprot); + struct.success.add(_elem1517); } iprot.readListEnd(); } @@ -175275,9 +175420,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1487 : struct.success) + for (HiveObjectPrivilege _iter1519 : struct.success) { - _iter1487.write(oprot); + _iter1519.write(oprot); } oprot.writeListEnd(); } @@ -175316,9 +175461,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1488 : struct.success) + for (HiveObjectPrivilege _iter1520 : struct.success) { - _iter1488.write(oprot); + _iter1520.write(oprot); } } } @@ -175333,14 +175478,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1489.size); - HiveObjectPrivilege _elem1490; - for (int _i1491 = 0; _i1491 < _list1489.size; ++_i1491) + org.apache.thrift.protocol.TList _list1521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1521.size); + HiveObjectPrivilege _elem1522; + for (int _i1523 = 0; _i1523 < _list1521.size; ++_i1523) { - _elem1490 = new HiveObjectPrivilege(); - _elem1490.read(iprot); - struct.success.add(_elem1490); + _elem1522 = new HiveObjectPrivilege(); + _elem1522.read(iprot); + struct.success.add(_elem1522); } } struct.setSuccessIsSet(true); @@ -179287,13 +179432,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1492 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1492.size); - String _elem1493; - for (int _i1494 = 0; _i1494 < _list1492.size; ++_i1494) + org.apache.thrift.protocol.TList _list1524 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1524.size); + String _elem1525; + for (int _i1526 = 0; _i1526 < _list1524.size; ++_i1526) { - _elem1493 = iprot.readString(); - struct.group_names.add(_elem1493); + _elem1525 = iprot.readString(); + struct.group_names.add(_elem1525); } iprot.readListEnd(); } @@ -179324,9 +179469,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1495 : struct.group_names) + for (String _iter1527 : struct.group_names) { - oprot.writeString(_iter1495); + oprot.writeString(_iter1527); } oprot.writeListEnd(); } @@ -179363,9 +179508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1496 : struct.group_names) + for (String _iter1528 : struct.group_names) { - oprot.writeString(_iter1496); + oprot.writeString(_iter1528); } } } @@ -179381,13 +179526,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1497.size); - String _elem1498; - for (int _i1499 = 0; _i1499 < _list1497.size; ++_i1499) + org.apache.thrift.protocol.TList _list1529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1529.size); + String _elem1530; + for (int _i1531 = 0; _i1531 < _list1529.size; ++_i1531) { - _elem1498 = iprot.readString(); - struct.group_names.add(_elem1498); + _elem1530 = iprot.readString(); + struct.group_names.add(_elem1530); } } struct.setGroup_namesIsSet(true); @@ -179790,13 +179935,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1500 = iprot.readListBegin(); - struct.success = new ArrayList(_list1500.size); - String _elem1501; - for (int _i1502 = 0; _i1502 < _list1500.size; ++_i1502) + org.apache.thrift.protocol.TList _list1532 = iprot.readListBegin(); + struct.success = new ArrayList(_list1532.size); + String _elem1533; + for (int _i1534 = 0; _i1534 < _list1532.size; ++_i1534) { - _elem1501 = iprot.readString(); - struct.success.add(_elem1501); + _elem1533 = iprot.readString(); + struct.success.add(_elem1533); } iprot.readListEnd(); } @@ -179831,9 +179976,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1503 : struct.success) + for (String _iter1535 : struct.success) { - oprot.writeString(_iter1503); + oprot.writeString(_iter1535); } oprot.writeListEnd(); } @@ -179872,9 +180017,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1504 : struct.success) + for (String _iter1536 : struct.success) { - oprot.writeString(_iter1504); + oprot.writeString(_iter1536); } } } @@ -179889,13 +180034,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1505.size); - String _elem1506; - for (int _i1507 = 0; _i1507 < _list1505.size; ++_i1507) + org.apache.thrift.protocol.TList _list1537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1537.size); + String _elem1538; + for (int _i1539 = 0; _i1539 < _list1537.size; ++_i1539) { - _elem1506 = iprot.readString(); - struct.success.add(_elem1506); + _elem1538 = iprot.readString(); + struct.success.add(_elem1538); } } struct.setSuccessIsSet(true); @@ -185186,13 +185331,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1508 = iprot.readListBegin(); - struct.success = new ArrayList(_list1508.size); - String _elem1509; - for (int _i1510 = 0; _i1510 < _list1508.size; ++_i1510) + org.apache.thrift.protocol.TList _list1540 = iprot.readListBegin(); + struct.success = new ArrayList(_list1540.size); + String _elem1541; + for (int _i1542 = 0; _i1542 < _list1540.size; ++_i1542) { - _elem1509 = iprot.readString(); - struct.success.add(_elem1509); + _elem1541 = iprot.readString(); + struct.success.add(_elem1541); } iprot.readListEnd(); } @@ -185218,9 +185363,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1511 : struct.success) + for (String _iter1543 : struct.success) { - oprot.writeString(_iter1511); + oprot.writeString(_iter1543); } oprot.writeListEnd(); } @@ -185251,9 +185396,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1512 : struct.success) + for (String _iter1544 : struct.success) { - oprot.writeString(_iter1512); + oprot.writeString(_iter1544); } } } @@ -185265,13 +185410,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1513.size); - String _elem1514; - for (int _i1515 = 0; _i1515 < _list1513.size; ++_i1515) + org.apache.thrift.protocol.TList _list1545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1545.size); + String _elem1546; + for (int _i1547 = 0; _i1547 < _list1545.size; ++_i1547) { - _elem1514 = iprot.readString(); - struct.success.add(_elem1514); + _elem1546 = iprot.readString(); + struct.success.add(_elem1546); } } struct.setSuccessIsSet(true); @@ -188301,13 +188446,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1516 = iprot.readListBegin(); - struct.success = new ArrayList(_list1516.size); - String _elem1517; - for (int _i1518 = 0; _i1518 < _list1516.size; ++_i1518) + org.apache.thrift.protocol.TList _list1548 = iprot.readListBegin(); + struct.success = new ArrayList(_list1548.size); + String _elem1549; + for (int _i1550 = 0; _i1550 < _list1548.size; ++_i1550) { - _elem1517 = iprot.readString(); - struct.success.add(_elem1517); + _elem1549 = iprot.readString(); + struct.success.add(_elem1549); } iprot.readListEnd(); } @@ -188333,9 +188478,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1519 : struct.success) + for (String _iter1551 : struct.success) { - oprot.writeString(_iter1519); + oprot.writeString(_iter1551); } oprot.writeListEnd(); } @@ -188366,9 +188511,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1520 : struct.success) + for (String _iter1552 : struct.success) { - oprot.writeString(_iter1520); + oprot.writeString(_iter1552); } } } @@ -188380,13 +188525,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1521.size); - String _elem1522; - for (int _i1523 = 0; _i1523 < _list1521.size; ++_i1523) + org.apache.thrift.protocol.TList _list1553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1553.size); + String _elem1554; + for (int _i1555 = 0; _i1555 < _list1553.size; ++_i1555) { - _elem1522 = iprot.readString(); - struct.success.add(_elem1522); + _elem1554 = iprot.readString(); + struct.success.add(_elem1554); } } struct.setSuccessIsSet(true); @@ -237295,14 +237440,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1524 = iprot.readListBegin(); - struct.success = new ArrayList(_list1524.size); - SchemaVersion _elem1525; - for (int _i1526 = 0; _i1526 < _list1524.size; ++_i1526) + org.apache.thrift.protocol.TList _list1556 = iprot.readListBegin(); + struct.success = new ArrayList(_list1556.size); + SchemaVersion _elem1557; + for (int _i1558 = 0; _i1558 < _list1556.size; ++_i1558) { - _elem1525 = new SchemaVersion(); - _elem1525.read(iprot); - struct.success.add(_elem1525); + _elem1557 = new SchemaVersion(); + _elem1557.read(iprot); + struct.success.add(_elem1557); } iprot.readListEnd(); } @@ -237346,9 +237491,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1527 : struct.success) + for (SchemaVersion _iter1559 : struct.success) { - _iter1527.write(oprot); + _iter1559.write(oprot); } oprot.writeListEnd(); } @@ -237395,9 +237540,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1528 : struct.success) + for (SchemaVersion _iter1560 : struct.success) { - _iter1528.write(oprot); + _iter1560.write(oprot); } } } @@ -237415,14 +237560,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1529.size); - SchemaVersion _elem1530; - for (int _i1531 = 0; _i1531 < _list1529.size; ++_i1531) + org.apache.thrift.protocol.TList _list1561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1561.size); + SchemaVersion _elem1562; + for (int _i1563 = 0; _i1563 < _list1561.size; ++_i1563) { - _elem1530 = new SchemaVersion(); - _elem1530.read(iprot); - struct.success.add(_elem1530); + _elem1562 = new SchemaVersion(); + _elem1562.read(iprot); + struct.success.add(_elem1562); } } struct.setSuccessIsSet(true); @@ -244366,11 +244511,734 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_lock_materialization_rebuild_result("); + StringBuilder sb = new StringBuilder("heartbeat_lock_materialization_rebuild_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class heartbeat_lock_materialization_rebuild_resultStandardSchemeFactory implements SchemeFactory { + public heartbeat_lock_materialization_rebuild_resultStandardScheme getScheme() { + return new heartbeat_lock_materialization_rebuild_resultStandardScheme(); + } + } + + private static class heartbeat_lock_materialization_rebuild_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class heartbeat_lock_materialization_rebuild_resultTupleSchemeFactory implements SchemeFactory { + public heartbeat_lock_materialization_rebuild_resultTupleScheme getScheme() { + return new heartbeat_lock_materialization_rebuild_resultTupleScheme(); + } + } + + private static class heartbeat_lock_materialization_rebuild_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_runtime_stats_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_runtime_stats_args"); + + private static final org.apache.thrift.protocol.TField STAT_FIELD_DESC = new org.apache.thrift.protocol.TField("stat", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_runtime_stats_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_runtime_stats_argsTupleSchemeFactory()); + } + + private RuntimeStat stat; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STAT((short)1, "stat"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STAT + return STAT; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STAT, new org.apache.thrift.meta_data.FieldMetaData("stat", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RuntimeStat.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_runtime_stats_args.class, metaDataMap); + } + + public add_runtime_stats_args() { + } + + public add_runtime_stats_args( + RuntimeStat stat) + { + this(); + this.stat = stat; + } + + /** + * Performs a deep copy on other. + */ + public add_runtime_stats_args(add_runtime_stats_args other) { + if (other.isSetStat()) { + this.stat = new RuntimeStat(other.stat); + } + } + + public add_runtime_stats_args deepCopy() { + return new add_runtime_stats_args(this); + } + + @Override + public void clear() { + this.stat = null; + } + + public RuntimeStat getStat() { + return this.stat; + } + + public void setStat(RuntimeStat stat) { + this.stat = stat; + } + + public void unsetStat() { + this.stat = null; + } + + /** Returns true if field stat is set (has been assigned a value) and false otherwise */ + public boolean isSetStat() { + return this.stat != null; + } + + public void setStatIsSet(boolean value) { + if (!value) { + this.stat = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STAT: + if (value == null) { + unsetStat(); + } else { + setStat((RuntimeStat)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STAT: + return getStat(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STAT: + return isSetStat(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_runtime_stats_args) + return this.equals((add_runtime_stats_args)that); + return false; + } + + public boolean equals(add_runtime_stats_args that) { + if (that == null) + return false; + + boolean this_present_stat = true && this.isSetStat(); + boolean that_present_stat = true && that.isSetStat(); + if (this_present_stat || that_present_stat) { + if (!(this_present_stat && that_present_stat)) + return false; + if (!this.stat.equals(that.stat)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_stat = true && (isSetStat()); + list.add(present_stat); + if (present_stat) + list.add(stat); + + return list.hashCode(); + } + + @Override + public int compareTo(add_runtime_stats_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetStat()).compareTo(other.isSetStat()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStat()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stat, other.stat); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_runtime_stats_args("); + boolean first = true; + + sb.append("stat:"); + if (this.stat == null) { + sb.append("null"); + } else { + sb.append(this.stat); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (stat != null) { + stat.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_runtime_stats_argsStandardSchemeFactory implements SchemeFactory { + public add_runtime_stats_argsStandardScheme getScheme() { + return new add_runtime_stats_argsStandardScheme(); + } + } + + private static class add_runtime_stats_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STAT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.stat = new RuntimeStat(); + struct.stat.read(iprot); + struct.setStatIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.stat != null) { + oprot.writeFieldBegin(STAT_FIELD_DESC); + struct.stat.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_runtime_stats_argsTupleSchemeFactory implements SchemeFactory { + public add_runtime_stats_argsTupleScheme getScheme() { + return new add_runtime_stats_argsTupleScheme(); + } + } + + private static class add_runtime_stats_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetStat()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetStat()) { + struct.stat.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.stat = new RuntimeStat(); + struct.stat.read(iprot); + struct.setStatIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_runtime_stats_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_runtime_stats_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_runtime_stats_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_runtime_stats_resultTupleSchemeFactory()); + } + + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_runtime_stats_result.class, metaDataMap); + } + + public add_runtime_stats_result() { + } + + public add_runtime_stats_result( + MetaException o1) + { + this(); + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public add_runtime_stats_result(add_runtime_stats_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public add_runtime_stats_result deepCopy() { + return new add_runtime_stats_result(this); + } + + @Override + public void clear() { + this.o1 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_runtime_stats_result) + return this.equals((add_runtime_stats_result)that); + return false; + } + + public boolean equals(add_runtime_stats_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(add_runtime_stats_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_runtime_stats_result("); boolean first = true; - sb.append("success:"); - sb.append(this.success); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } first = false; sb.append(")"); return sb.toString(); @@ -244391,23 +245259,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class heartbeat_lock_materialization_rebuild_resultStandardSchemeFactory implements SchemeFactory { - public heartbeat_lock_materialization_rebuild_resultStandardScheme getScheme() { - return new heartbeat_lock_materialization_rebuild_resultStandardScheme(); + private static class add_runtime_stats_resultStandardSchemeFactory implements SchemeFactory { + public add_runtime_stats_resultStandardScheme getScheme() { + return new add_runtime_stats_resultStandardScheme(); } } - private static class heartbeat_lock_materialization_rebuild_resultStandardScheme extends StandardScheme { + private static class add_runtime_stats_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -244417,10 +245283,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_lock_mate break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); - struct.setSuccessIsSet(true); + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -244434,13 +245301,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_lock_mate struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_runtime_stats_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -244449,56 +245316,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_lock_mat } - private static class heartbeat_lock_materialization_rebuild_resultTupleSchemeFactory implements SchemeFactory { - public heartbeat_lock_materialization_rebuild_resultTupleScheme getScheme() { - return new heartbeat_lock_materialization_rebuild_resultTupleScheme(); + private static class add_runtime_stats_resultTupleSchemeFactory implements SchemeFactory { + public add_runtime_stats_resultTupleScheme getScheme() { + return new add_runtime_stats_resultTupleScheme(); } } - private static class heartbeat_lock_materialization_rebuild_resultTupleScheme extends TupleScheme { + private static class add_runtime_stats_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { + if (struct.isSetO1()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + if (struct.isSetO1()) { + struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_lock_materialization_rebuild_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = iprot.readBool(); - struct.setSuccessIsSet(true); + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_runtime_stats_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_runtime_stats_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_runtime_stats_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_runtime_stats_args"); - private static final org.apache.thrift.protocol.TField STAT_FIELD_DESC = new org.apache.thrift.protocol.TField("stat", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_runtime_stats_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_runtime_stats_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_runtime_stats_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_runtime_stats_argsTupleSchemeFactory()); } - private RuntimeStat stat; // required + private GetRuntimeStatsRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STAT((short)1, "stat"); + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -244513,8 +245381,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_lock_mater */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // STAT - return STAT; + case 1: // RQST + return RQST; default: return null; } @@ -244558,70 +245426,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STAT, new org.apache.thrift.meta_data.FieldMetaData("stat", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RuntimeStat.class))); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetRuntimeStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_runtime_stats_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_runtime_stats_args.class, metaDataMap); } - public add_runtime_stats_args() { + public get_runtime_stats_args() { } - public add_runtime_stats_args( - RuntimeStat stat) + public get_runtime_stats_args( + GetRuntimeStatsRequest rqst) { this(); - this.stat = stat; + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public add_runtime_stats_args(add_runtime_stats_args other) { - if (other.isSetStat()) { - this.stat = new RuntimeStat(other.stat); + public get_runtime_stats_args(get_runtime_stats_args other) { + if (other.isSetRqst()) { + this.rqst = new GetRuntimeStatsRequest(other.rqst); } } - public add_runtime_stats_args deepCopy() { - return new add_runtime_stats_args(this); + public get_runtime_stats_args deepCopy() { + return new get_runtime_stats_args(this); } @Override public void clear() { - this.stat = null; + this.rqst = null; } - public RuntimeStat getStat() { - return this.stat; + public GetRuntimeStatsRequest getRqst() { + return this.rqst; } - public void setStat(RuntimeStat stat) { - this.stat = stat; + public void setRqst(GetRuntimeStatsRequest rqst) { + this.rqst = rqst; } - public void unsetStat() { - this.stat = null; + public void unsetRqst() { + this.rqst = null; } - /** Returns true if field stat is set (has been assigned a value) and false otherwise */ - public boolean isSetStat() { - return this.stat != null; + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; } - public void setStatIsSet(boolean value) { + public void setRqstIsSet(boolean value) { if (!value) { - this.stat = null; + this.rqst = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case STAT: + case RQST: if (value == null) { - unsetStat(); + unsetRqst(); } else { - setStat((RuntimeStat)value); + setRqst((GetRuntimeStatsRequest)value); } break; @@ -244630,8 +245498,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case STAT: - return getStat(); + case RQST: + return getRqst(); } throw new IllegalStateException(); @@ -244644,8 +245512,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case STAT: - return isSetStat(); + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -244654,21 +245522,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_runtime_stats_args) - return this.equals((add_runtime_stats_args)that); + if (that instanceof get_runtime_stats_args) + return this.equals((get_runtime_stats_args)that); return false; } - public boolean equals(add_runtime_stats_args that) { + public boolean equals(get_runtime_stats_args that) { if (that == null) return false; - boolean this_present_stat = true && this.isSetStat(); - boolean that_present_stat = true && that.isSetStat(); - if (this_present_stat || that_present_stat) { - if (!(this_present_stat && that_present_stat)) + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) return false; - if (!this.stat.equals(that.stat)) + if (!this.rqst.equals(that.rqst)) return false; } @@ -244679,28 +245547,28 @@ public boolean equals(add_runtime_stats_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_stat = true && (isSetStat()); - list.add(present_stat); - if (present_stat) - list.add(stat); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); return list.hashCode(); } @Override - public int compareTo(add_runtime_stats_args other) { + public int compareTo(get_runtime_stats_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetStat()).compareTo(other.isSetStat()); + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); if (lastComparison != 0) { return lastComparison; } - if (isSetStat()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stat, other.stat); + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); if (lastComparison != 0) { return lastComparison; } @@ -244722,14 +245590,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_runtime_stats_args("); + StringBuilder sb = new StringBuilder("get_runtime_stats_args("); boolean first = true; - sb.append("stat:"); - if (this.stat == null) { + sb.append("rqst:"); + if (this.rqst == null) { sb.append("null"); } else { - sb.append(this.stat); + sb.append(this.rqst); } first = false; sb.append(")"); @@ -244739,8 +245607,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (stat != null) { - stat.validate(); + if (rqst != null) { + rqst.validate(); } } @@ -244760,15 +245628,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_runtime_stats_argsStandardSchemeFactory implements SchemeFactory { - public add_runtime_stats_argsStandardScheme getScheme() { - return new add_runtime_stats_argsStandardScheme(); + private static class get_runtime_stats_argsStandardSchemeFactory implements SchemeFactory { + public get_runtime_stats_argsStandardScheme getScheme() { + return new get_runtime_stats_argsStandardScheme(); } } - private static class add_runtime_stats_argsStandardScheme extends StandardScheme { + private static class get_runtime_stats_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -244778,11 +245646,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_a break; } switch (schemeField.id) { - case 1: // STAT + case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.stat = new RuntimeStat(); - struct.stat.read(iprot); - struct.setStatIsSet(true); + struct.rqst = new GetRuntimeStatsRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -244796,13 +245664,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.stat != null) { - oprot.writeFieldBegin(STAT_FIELD_DESC); - struct.stat.write(oprot); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -244811,56 +245679,59 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_runtime_stats_ } - private static class add_runtime_stats_argsTupleSchemeFactory implements SchemeFactory { - public add_runtime_stats_argsTupleScheme getScheme() { - return new add_runtime_stats_argsTupleScheme(); + private static class get_runtime_stats_argsTupleSchemeFactory implements SchemeFactory { + public get_runtime_stats_argsTupleScheme getScheme() { + return new get_runtime_stats_argsTupleScheme(); } } - private static class add_runtime_stats_argsTupleScheme extends TupleScheme { + private static class get_runtime_stats_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetStat()) { + if (struct.isSetRqst()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetStat()) { - struct.stat.write(oprot); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.stat = new RuntimeStat(); - struct.stat.read(iprot); - struct.setStatIsSet(true); + struct.rqst = new GetRuntimeStatsRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_runtime_stats_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_runtime_stats_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_runtime_stats_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_runtime_stats_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_runtime_stats_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_runtime_stats_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_runtime_stats_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_runtime_stats_resultTupleSchemeFactory()); } + private List success; // required private MetaException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -244876,6 +245747,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_ar */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; case 1: // O1 return O1; default: @@ -244921,40 +245794,91 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RuntimeStat.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_runtime_stats_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_runtime_stats_result.class, metaDataMap); } - public add_runtime_stats_result() { + public get_runtime_stats_result() { } - public add_runtime_stats_result( + public get_runtime_stats_result( + List success, MetaException o1) { this(); + this.success = success; this.o1 = o1; } /** * Performs a deep copy on other. */ - public add_runtime_stats_result(add_runtime_stats_result other) { + public get_runtime_stats_result(get_runtime_stats_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success.size()); + for (RuntimeStat other_element : other.success) { + __this__success.add(new RuntimeStat(other_element)); + } + this.success = __this__success; + } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); } } - public add_runtime_stats_result deepCopy() { - return new add_runtime_stats_result(this); + public get_runtime_stats_result deepCopy() { + return new get_runtime_stats_result(this); } @Override public void clear() { + this.success = null; this.o1 = null; } + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(RuntimeStat elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + public MetaException getO1() { return this.o1; } @@ -244980,6 +245904,14 @@ public void setO1IsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + case O1: if (value == null) { unsetO1(); @@ -244993,6 +245925,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + case O1: return getO1(); @@ -245007,6 +245942,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); } @@ -245017,15 +245954,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_runtime_stats_result) - return this.equals((add_runtime_stats_result)that); + if (that instanceof get_runtime_stats_result) + return this.equals((get_runtime_stats_result)that); return false; } - public boolean equals(add_runtime_stats_result that) { + public boolean equals(get_runtime_stats_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -245042,6 +245988,11 @@ public boolean equals(add_runtime_stats_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -245051,13 +246002,23 @@ public int hashCode() { } @Override - public int compareTo(add_runtime_stats_result other) { + public int compareTo(get_runtime_stats_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -245085,9 +246046,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_runtime_stats_result("); + StringBuilder sb = new StringBuilder("get_runtime_stats_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -245120,15 +246089,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_runtime_stats_resultStandardSchemeFactory implements SchemeFactory { - public add_runtime_stats_resultStandardScheme getScheme() { - return new add_runtime_stats_resultStandardScheme(); + private static class get_runtime_stats_resultStandardSchemeFactory implements SchemeFactory { + public get_runtime_stats_resultStandardScheme getScheme() { + return new get_runtime_stats_resultStandardScheme(); } } - private static class add_runtime_stats_resultStandardScheme extends StandardScheme { + private static class get_runtime_stats_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -245138,6 +246107,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_r break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1564 = iprot.readListBegin(); + struct.success = new ArrayList(_list1564.size); + RuntimeStat _elem1565; + for (int _i1566 = 0; _i1566 < _list1564.size; ++_i1566) + { + _elem1565 = new RuntimeStat(); + _elem1565.read(iprot); + struct.success.add(_elem1565); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new MetaException(); @@ -245156,10 +246144,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_runtime_stats_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_runtime_stats_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (RuntimeStat _iter1567 : struct.success) + { + _iter1567.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -245171,32 +246171,58 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_runtime_stats_ } - private static class add_runtime_stats_resultTupleSchemeFactory implements SchemeFactory { - public add_runtime_stats_resultTupleScheme getScheme() { - return new add_runtime_stats_resultTupleScheme(); + private static class get_runtime_stats_resultTupleSchemeFactory implements SchemeFactory { + public get_runtime_stats_resultTupleScheme getScheme() { + return new get_runtime_stats_resultTupleScheme(); } } - private static class add_runtime_stats_resultTupleScheme extends TupleScheme { + private static class get_runtime_stats_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (RuntimeStat _iter1568 : struct.success) + { + _iter1568.write(oprot); + } + } + } if (struct.isSetO1()) { struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1569.size); + RuntimeStat _elem1570; + for (int _i1571 = 0; _i1571 < _list1569.size; ++_i1571) + { + _elem1570 = new RuntimeStat(); + _elem1570.read(iprot); + struct.success.add(_elem1570); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); @@ -245206,22 +246232,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_re } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_runtime_stats_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_runtime_stats_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_with_specs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_with_specs_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_runtime_stats_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_runtime_stats_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_with_specs_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_with_specs_argsTupleSchemeFactory()); } - private GetRuntimeStatsRequest rqst; // required + private GetPartitionsRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQUEST((short)1, "request"); private static final Map byName = new HashMap(); @@ -245236,8 +246262,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_runtime_stats_re */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQUEST + return REQUEST; default: return null; } @@ -245281,70 +246307,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetRuntimeStatsRequest.class))); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetPartitionsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_runtime_stats_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_with_specs_args.class, metaDataMap); } - public get_runtime_stats_args() { + public get_partitions_with_specs_args() { } - public get_runtime_stats_args( - GetRuntimeStatsRequest rqst) + public get_partitions_with_specs_args( + GetPartitionsRequest request) { this(); - this.rqst = rqst; + this.request = request; } /** * Performs a deep copy on other. */ - public get_runtime_stats_args(get_runtime_stats_args other) { - if (other.isSetRqst()) { - this.rqst = new GetRuntimeStatsRequest(other.rqst); + public get_partitions_with_specs_args(get_partitions_with_specs_args other) { + if (other.isSetRequest()) { + this.request = new GetPartitionsRequest(other.request); } } - public get_runtime_stats_args deepCopy() { - return new get_runtime_stats_args(this); + public get_partitions_with_specs_args deepCopy() { + return new get_partitions_with_specs_args(this); } @Override public void clear() { - this.rqst = null; + this.request = null; } - public GetRuntimeStatsRequest getRqst() { - return this.rqst; + public GetPartitionsRequest getRequest() { + return this.request; } - public void setRqst(GetRuntimeStatsRequest rqst) { - this.rqst = rqst; + public void setRequest(GetPartitionsRequest request) { + this.request = request; } - public void unsetRqst() { - this.rqst = null; + public void unsetRequest() { + this.request = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; } - public void setRqstIsSet(boolean value) { + public void setRequestIsSet(boolean value) { if (!value) { - this.rqst = null; + this.request = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQUEST: if (value == null) { - unsetRqst(); + unsetRequest(); } else { - setRqst((GetRuntimeStatsRequest)value); + setRequest((GetPartitionsRequest)value); } break; @@ -245353,8 +246379,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQUEST: + return getRequest(); } throw new IllegalStateException(); @@ -245367,8 +246393,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQUEST: + return isSetRequest(); } throw new IllegalStateException(); } @@ -245377,21 +246403,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_runtime_stats_args) - return this.equals((get_runtime_stats_args)that); + if (that instanceof get_partitions_with_specs_args) + return this.equals((get_partitions_with_specs_args)that); return false; } - public boolean equals(get_runtime_stats_args that) { + public boolean equals(get_partitions_with_specs_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.request.equals(that.request)) return false; } @@ -245402,28 +246428,28 @@ public boolean equals(get_runtime_stats_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); return list.hashCode(); } @Override - public int compareTo(get_runtime_stats_args other) { + public int compareTo(get_partitions_with_specs_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); if (lastComparison != 0) { return lastComparison; } @@ -245445,14 +246471,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_runtime_stats_args("); + StringBuilder sb = new StringBuilder("get_partitions_with_specs_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("request:"); + if (this.request == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.request); } first = false; sb.append(")"); @@ -245462,8 +246488,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (request != null) { + request.validate(); } } @@ -245483,15 +246509,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_runtime_stats_argsStandardSchemeFactory implements SchemeFactory { - public get_runtime_stats_argsStandardScheme getScheme() { - return new get_runtime_stats_argsStandardScheme(); + private static class get_partitions_with_specs_argsStandardSchemeFactory implements SchemeFactory { + public get_partitions_with_specs_argsStandardScheme getScheme() { + return new get_partitions_with_specs_argsStandardScheme(); } } - private static class get_runtime_stats_argsStandardScheme extends StandardScheme { + private static class get_partitions_with_specs_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with_specs_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -245501,11 +246527,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_a break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new GetRuntimeStatsRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.request = new GetPartitionsRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -245519,13 +246545,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_with_specs_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -245534,54 +246560,54 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ } - private static class get_runtime_stats_argsTupleSchemeFactory implements SchemeFactory { - public get_runtime_stats_argsTupleScheme getScheme() { - return new get_runtime_stats_argsTupleScheme(); + private static class get_partitions_with_specs_argsTupleSchemeFactory implements SchemeFactory { + public get_partitions_with_specs_argsTupleScheme getScheme() { + return new get_partitions_with_specs_argsTupleScheme(); } } - private static class get_runtime_stats_argsTupleScheme extends TupleScheme { + private static class get_partitions_with_specs_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_specs_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetRequest()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetRequest()) { + struct.request.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_specs_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new GetRuntimeStatsRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.request = new GetPartitionsRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_runtime_stats_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_runtime_stats_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_with_specs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_with_specs_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_runtime_stats_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_runtime_stats_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_with_specs_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_with_specs_resultTupleSchemeFactory()); } - private List success; // required + private GetPartitionsResponse success; // required private MetaException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -245650,19 +246676,18 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RuntimeStat.class)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetPartitionsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_runtime_stats_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_with_specs_result.class, metaDataMap); } - public get_runtime_stats_result() { + public get_partitions_with_specs_result() { } - public get_runtime_stats_result( - List success, + public get_partitions_with_specs_result( + GetPartitionsResponse success, MetaException o1) { this(); @@ -245673,21 +246698,17 @@ public get_runtime_stats_result( /** * Performs a deep copy on other. */ - public get_runtime_stats_result(get_runtime_stats_result other) { + public get_partitions_with_specs_result(get_partitions_with_specs_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (RuntimeStat other_element : other.success) { - __this__success.add(new RuntimeStat(other_element)); - } - this.success = __this__success; + this.success = new GetPartitionsResponse(other.success); } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); } } - public get_runtime_stats_result deepCopy() { - return new get_runtime_stats_result(this); + public get_partitions_with_specs_result deepCopy() { + return new get_partitions_with_specs_result(this); } @Override @@ -245696,26 +246717,11 @@ public void clear() { this.o1 = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(RuntimeStat elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { + public GetPartitionsResponse getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(GetPartitionsResponse success) { this.success = success; } @@ -245763,7 +246769,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((GetPartitionsResponse)value); } break; @@ -245809,12 +246815,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_runtime_stats_result) - return this.equals((get_runtime_stats_result)that); + if (that instanceof get_partitions_with_specs_result) + return this.equals((get_partitions_with_specs_result)that); return false; } - public boolean equals(get_runtime_stats_result that) { + public boolean equals(get_partitions_with_specs_result that) { if (that == null) return false; @@ -245857,7 +246863,7 @@ public int hashCode() { } @Override - public int compareTo(get_runtime_stats_result other) { + public int compareTo(get_partitions_with_specs_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -245901,7 +246907,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_runtime_stats_result("); + StringBuilder sb = new StringBuilder("get_partitions_with_specs_result("); boolean first = true; sb.append("success:"); @@ -245926,6 +246932,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -245944,15 +246953,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_runtime_stats_resultStandardSchemeFactory implements SchemeFactory { - public get_runtime_stats_resultStandardScheme getScheme() { - return new get_runtime_stats_resultStandardScheme(); + private static class get_partitions_with_specs_resultStandardSchemeFactory implements SchemeFactory { + public get_partitions_with_specs_resultStandardScheme getScheme() { + return new get_partitions_with_specs_resultStandardScheme(); } } - private static class get_runtime_stats_resultStandardScheme extends StandardScheme { + private static class get_partitions_with_specs_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with_specs_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -245963,19 +246972,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list1532 = iprot.readListBegin(); - struct.success = new ArrayList(_list1532.size); - RuntimeStat _elem1533; - for (int _i1534 = 0; _i1534 < _list1532.size; ++_i1534) - { - _elem1533 = new RuntimeStat(); - _elem1533.read(iprot); - struct.success.add(_elem1533); - } - iprot.readListEnd(); - } + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetPartitionsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -245999,20 +246998,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_with_specs_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter1535 : struct.success) - { - _iter1535.write(oprot); - } - oprot.writeListEnd(); - } + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -246026,16 +247018,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ } - private static class get_runtime_stats_resultTupleSchemeFactory implements SchemeFactory { - public get_runtime_stats_resultTupleScheme getScheme() { - return new get_runtime_stats_resultTupleScheme(); + private static class get_partitions_with_specs_resultTupleSchemeFactory implements SchemeFactory { + public get_partitions_with_specs_resultTupleScheme getScheme() { + return new get_partitions_with_specs_resultTupleScheme(); } } - private static class get_runtime_stats_resultTupleScheme extends TupleScheme { + private static class get_partitions_with_specs_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_specs_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -246046,13 +247038,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r } oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter1536 : struct.success) - { - _iter1536.write(oprot); - } - } + struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -246060,21 +247046,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_specs_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list1537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1537.size); - RuntimeStat _elem1538; - for (int _i1539 = 0; _i1539 < _list1537.size; ++_i1539) - { - _elem1538 = new RuntimeStat(); - _elem1538.read(iprot); - struct.success.add(_elem1538); - } - } + struct.success = new GetPartitionsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java index 1acf6f4b4ed9d4e6bce4010b534dccb52ecd6f3b..8f23274867fa1f055efc4d2ba393dae1a4cd7397 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe case 1: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list344 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list344.size); - SQLUniqueConstraint _elem345; - for (int _i346 = 0; _i346 < _list344.size; ++_i346) + org.apache.thrift.protocol.TList _list376 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list376.size); + SQLUniqueConstraint _elem377; + for (int _i378 = 0; _i378 < _list376.size; ++_i378) { - _elem345 = new SQLUniqueConstraint(); - _elem345.read(iprot); - struct.uniqueConstraints.add(_elem345); + _elem377 = new SQLUniqueConstraint(); + _elem377.read(iprot); + struct.uniqueConstraints.add(_elem377); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, UniqueConstraintsR oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter347 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter379 : struct.uniqueConstraints) { - _iter347.write(oprot); + _iter379.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter348 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter380 : struct.uniqueConstraints) { - _iter348.write(oprot); + _iter380.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe public void read(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list349.size); - SQLUniqueConstraint _elem350; - for (int _i351 = 0; _i351 < _list349.size; ++_i351) + org.apache.thrift.protocol.TList _list381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list381.size); + SQLUniqueConstraint _elem382; + for (int _i383 = 0; _i383 < _list381.size; ++_i383) { - _elem350 = new SQLUniqueConstraint(); - _elem350.read(iprot); - struct.uniqueConstraints.add(_elem350); + _elem382 = new SQLUniqueConstraint(); + _elem382.read(iprot); + struct.uniqueConstraints.add(_elem382); } } struct.setUniqueConstraintsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index 44674798f718ac121c6f523d28173ff892e1d874..4b38aebaf72c9e5d991210873e13276fc8aab8a3 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.pools = new ArrayList(_list872.size); - WMPool _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); + struct.pools = new ArrayList(_list904.size); + WMPool _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem873 = new WMPool(); - _elem873.read(iprot); - struct.pools.add(_elem873); + _elem905 = new WMPool(); + _elem905.read(iprot); + struct.pools.add(_elem905); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list875 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list875.size); - WMMapping _elem876; - for (int _i877 = 0; _i877 < _list875.size; ++_i877) + org.apache.thrift.protocol.TList _list907 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list907.size); + WMMapping _elem908; + for (int _i909 = 0; _i909 < _list907.size; ++_i909) { - _elem876 = new WMMapping(); - _elem876.read(iprot); - struct.mappings.add(_elem876); + _elem908 = new WMMapping(); + _elem908.read(iprot); + struct.mappings.add(_elem908); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list878 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list878.size); - WMTrigger _elem879; - for (int _i880 = 0; _i880 < _list878.size; ++_i880) + org.apache.thrift.protocol.TList _list910 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list910.size); + WMTrigger _elem911; + for (int _i912 = 0; _i912 < _list910.size; ++_i912) { - _elem879 = new WMTrigger(); - _elem879.read(iprot); - struct.triggers.add(_elem879); + _elem911 = new WMTrigger(); + _elem911.read(iprot); + struct.triggers.add(_elem911); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list881 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list881.size); - WMPoolTrigger _elem882; - for (int _i883 = 0; _i883 < _list881.size; ++_i883) + org.apache.thrift.protocol.TList _list913 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list913.size); + WMPoolTrigger _elem914; + for (int _i915 = 0; _i915 < _list913.size; ++_i915) { - _elem882 = new WMPoolTrigger(); - _elem882.read(iprot); - struct.poolTriggers.add(_elem882); + _elem914 = new WMPoolTrigger(); + _elem914.read(iprot); + struct.poolTriggers.add(_elem914); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter884 : struct.pools) + for (WMPool _iter916 : struct.pools) { - _iter884.write(oprot); + _iter916.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter885 : struct.mappings) + for (WMMapping _iter917 : struct.mappings) { - _iter885.write(oprot); + _iter917.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter886 : struct.triggers) + for (WMTrigger _iter918 : struct.triggers) { - _iter886.write(oprot); + _iter918.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter887 : struct.poolTriggers) + for (WMPoolTrigger _iter919 : struct.poolTriggers) { - _iter887.write(oprot); + _iter919.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter888 : struct.pools) + for (WMPool _iter920 : struct.pools) { - _iter888.write(oprot); + _iter920.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter889 : struct.mappings) + for (WMMapping _iter921 : struct.mappings) { - _iter889.write(oprot); + _iter921.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter890 : struct.triggers) + for (WMTrigger _iter922 : struct.triggers) { - _iter890.write(oprot); + _iter922.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter891 : struct.poolTriggers) + for (WMPoolTrigger _iter923 : struct.poolTriggers) { - _iter891.write(oprot); + _iter923.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list892 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list892.size); - WMPool _elem893; - for (int _i894 = 0; _i894 < _list892.size; ++_i894) + org.apache.thrift.protocol.TList _list924 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list924.size); + WMPool _elem925; + for (int _i926 = 0; _i926 < _list924.size; ++_i926) { - _elem893 = new WMPool(); - _elem893.read(iprot); - struct.pools.add(_elem893); + _elem925 = new WMPool(); + _elem925.read(iprot); + struct.pools.add(_elem925); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list895 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list895.size); - WMMapping _elem896; - for (int _i897 = 0; _i897 < _list895.size; ++_i897) + org.apache.thrift.protocol.TList _list927 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list927.size); + WMMapping _elem928; + for (int _i929 = 0; _i929 < _list927.size; ++_i929) { - _elem896 = new WMMapping(); - _elem896.read(iprot); - struct.mappings.add(_elem896); + _elem928 = new WMMapping(); + _elem928.read(iprot); + struct.mappings.add(_elem928); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list898 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list898.size); - WMTrigger _elem899; - for (int _i900 = 0; _i900 < _list898.size; ++_i900) + org.apache.thrift.protocol.TList _list930 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list930.size); + WMTrigger _elem931; + for (int _i932 = 0; _i932 < _list930.size; ++_i932) { - _elem899 = new WMTrigger(); - _elem899.read(iprot); - struct.triggers.add(_elem899); + _elem931 = new WMTrigger(); + _elem931.read(iprot); + struct.triggers.add(_elem931); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list901.size); - WMPoolTrigger _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list933.size); + WMPoolTrigger _elem934; + for (int _i935 = 0; _i935 < _list933.size; ++_i935) { - _elem902 = new WMPoolTrigger(); - _elem902.read(iprot); - struct.poolTriggers.add(_elem902); + _elem934 = new WMPoolTrigger(); + _elem934.read(iprot); + struct.poolTriggers.add(_elem934); } } struct.setPoolTriggersIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index c6cb845585363d15cd50a541a72548f0745ea80a..e97eab36dd820d69170020cbcbb840f4e28f3ce5 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list904.size); - WMResourcePlan _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list936.size); + WMResourcePlan _elem937; + for (int _i938 = 0; _i938 < _list936.size; ++_i938) { - _elem905 = new WMResourcePlan(); - _elem905.read(iprot); - struct.resourcePlans.add(_elem905); + _elem937 = new WMResourcePlan(); + _elem937.read(iprot); + struct.resourcePlans.add(_elem937); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter907 : struct.resourcePlans) + for (WMResourcePlan _iter939 : struct.resourcePlans) { - _iter907.write(oprot); + _iter939.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter908 : struct.resourcePlans) + for (WMResourcePlan _iter940 : struct.resourcePlans) { - _iter908.write(oprot); + _iter940.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list909.size); - WMResourcePlan _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list941.size); + WMResourcePlan _elem942; + for (int _i943 = 0; _i943 < _list941.size; ++_i943) { - _elem910 = new WMResourcePlan(); - _elem910.read(iprot); - struct.resourcePlans.add(_elem910); + _elem942 = new WMResourcePlan(); + _elem942.read(iprot); + struct.resourcePlans.add(_elem942); } } struct.setResourcePlansIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index 9eed335cda8561938ba68fbb8e9453c584735434..4a6633bce6c595bc69660cf2c0e55b9316a8a139 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list928.size); - WMTrigger _elem929; - for (int _i930 = 0; _i930 < _list928.size; ++_i930) + org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list960.size); + WMTrigger _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem929 = new WMTrigger(); - _elem929.read(iprot); - struct.triggers.add(_elem929); + _elem961 = new WMTrigger(); + _elem961.read(iprot); + struct.triggers.add(_elem961); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter931 : struct.triggers) + for (WMTrigger _iter963 : struct.triggers) { - _iter931.write(oprot); + _iter963.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter932 : struct.triggers) + for (WMTrigger _iter964 : struct.triggers) { - _iter932.write(oprot); + _iter964.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list933.size); - WMTrigger _elem934; - for (int _i935 = 0; _i935 < _list933.size; ++_i935) + org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list965.size); + WMTrigger _elem966; + for (int _i967 = 0; _i967 < _list965.size; ++_i967) { - _elem934 = new WMTrigger(); - _elem934.read(iprot); - struct.triggers.add(_elem934); + _elem966 = new WMTrigger(); + _elem966.read(iprot); + struct.triggers.add(_elem966); } } struct.setTriggersIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index ee9251c86660b7f731ab7e20643dd6db7e351e3f..b01a517ec4cb52fcb98982f8a11faec1442b4fc0 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); - struct.errors = new ArrayList(_list912.size); - String _elem913; - for (int _i914 = 0; _i914 < _list912.size; ++_i914) + org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); + struct.errors = new ArrayList(_list944.size); + String _elem945; + for (int _i946 = 0; _i946 < _list944.size; ++_i946) { - _elem913 = iprot.readString(); - struct.errors.add(_elem913); + _elem945 = iprot.readString(); + struct.errors.add(_elem945); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list915 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list915.size); - String _elem916; - for (int _i917 = 0; _i917 < _list915.size; ++_i917) + org.apache.thrift.protocol.TList _list947 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list947.size); + String _elem948; + for (int _i949 = 0; _i949 < _list947.size; ++_i949) { - _elem916 = iprot.readString(); - struct.warnings.add(_elem916); + _elem948 = iprot.readString(); + struct.warnings.add(_elem948); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter918 : struct.errors) + for (String _iter950 : struct.errors) { - oprot.writeString(_iter918); + oprot.writeString(_iter950); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter919 : struct.warnings) + for (String _iter951 : struct.warnings) { - oprot.writeString(_iter919); + oprot.writeString(_iter951); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter920 : struct.errors) + for (String _iter952 : struct.errors) { - oprot.writeString(_iter920); + oprot.writeString(_iter952); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter921 : struct.warnings) + for (String _iter953 : struct.warnings) { - oprot.writeString(_iter921); + oprot.writeString(_iter953); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list922 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list922.size); - String _elem923; - for (int _i924 = 0; _i924 < _list922.size; ++_i924) + org.apache.thrift.protocol.TList _list954 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list954.size); + String _elem955; + for (int _i956 = 0; _i956 < _list954.size; ++_i956) { - _elem923 = iprot.readString(); - struct.errors.add(_elem923); + _elem955 = iprot.readString(); + struct.errors.add(_elem955); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list925.size); - String _elem926; - for (int _i927 = 0; _i927 < _list925.size; ++_i927) + org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list957.size); + String _elem958; + for (int _i959 = 0; _i959 < _list957.size; ++_i959) { - _elem926 = iprot.readString(); - struct.warnings.add(_elem926); + _elem958 = iprot.readString(); + struct.warnings.add(_elem958); } } struct.setWarningsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java index c7ef726f5481b37561f8144b796fdadfb4254452..500293cc7869d05ab75752a9cadbffae40ad6a7e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WriteNotificationLogRequest.java @@ -813,13 +813,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WriteNotificationLo case 6: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list772.size); - String _elem773; - for (int _i774 = 0; _i774 < _list772.size; ++_i774) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list804.size); + String _elem805; + for (int _i806 = 0; _i806 < _list804.size; ++_i806) { - _elem773 = iprot.readString(); - struct.partitionVals.add(_elem773); + _elem805 = iprot.readString(); + struct.partitionVals.add(_elem805); } iprot.readListEnd(); } @@ -867,9 +867,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WriteNotificationL oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter775 : struct.partitionVals) + for (String _iter807 : struct.partitionVals) { - oprot.writeString(_iter775); + oprot.writeString(_iter807); } oprot.writeListEnd(); } @@ -906,9 +906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLo if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter776 : struct.partitionVals) + for (String _iter808 : struct.partitionVals) { - oprot.writeString(_iter776); + oprot.writeString(_iter808); } } } @@ -931,13 +931,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WriteNotificationLog BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list777.size); - String _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list809.size); + String _elem810; + for (int _i811 = 0; _i811 < _list809.size; ++_i811) { - _elem778 = iprot.readString(); - struct.partitionVals.add(_elem778); + _elem810 = iprot.readString(); + struct.partitionVals.add(_elem810); } } struct.setPartitionValsIsSet(true); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index cc19f2389e7b595722dcc1f3296877a02b20e0a4..5f07c148745e64f647312f22c012822fae4b9346 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1608,6 +1608,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\MetaException */ public function get_runtime_stats(\metastore\GetRuntimeStatsRequest $rqst); + /** + * @param \metastore\GetPartitionsRequest $request + * @return \metastore\GetPartitionsResponse + * @throws \metastore\MetaException + */ + public function get_partitions_with_specs(\metastore\GetPartitionsRequest $request); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -13755,6 +13761,60 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_runtime_stats failed: unknown result"); } + public function get_partitions_with_specs(\metastore\GetPartitionsRequest $request) + { + $this->send_get_partitions_with_specs($request); + return $this->recv_get_partitions_with_specs(); + } + + public function send_get_partitions_with_specs(\metastore\GetPartitionsRequest $request) + { + $args = new \metastore\ThriftHiveMetastore_get_partitions_with_specs_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_partitions_with_specs', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_partitions_with_specs', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_partitions_with_specs() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_partitions_with_specs_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_partitions_with_specs_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new \Exception("get_partitions_with_specs failed: unknown result"); + } + } // HELPER FUNCTIONS AND STRUCTURES @@ -15892,14 +15952,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size854 = 0; - $_etype857 = 0; - $xfer += $input->readListBegin($_etype857, $_size854); - for ($_i858 = 0; $_i858 < $_size854; ++$_i858) + $_size882 = 0; + $_etype885 = 0; + $xfer += $input->readListBegin($_etype885, $_size882); + for ($_i886 = 0; $_i886 < $_size882; ++$_i886) { - $elem859 = null; - $xfer += $input->readString($elem859); - $this->success []= $elem859; + $elem887 = null; + $xfer += $input->readString($elem887); + $this->success []= $elem887; } $xfer += $input->readListEnd(); } else { @@ -15935,9 +15995,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter860) + foreach ($this->success as $iter888) { - $xfer += $output->writeString($iter860); + $xfer += $output->writeString($iter888); } } $output->writeListEnd(); @@ -16068,14 +16128,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size861 = 0; - $_etype864 = 0; - $xfer += $input->readListBegin($_etype864, $_size861); - for ($_i865 = 0; $_i865 < $_size861; ++$_i865) + $_size889 = 0; + $_etype892 = 0; + $xfer += $input->readListBegin($_etype892, $_size889); + for ($_i893 = 0; $_i893 < $_size889; ++$_i893) { - $elem866 = null; - $xfer += $input->readString($elem866); - $this->success []= $elem866; + $elem894 = null; + $xfer += $input->readString($elem894); + $this->success []= $elem894; } $xfer += $input->readListEnd(); } else { @@ -16111,9 +16171,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter867) + foreach ($this->success as $iter895) { - $xfer += $output->writeString($iter867); + $xfer += $output->writeString($iter895); } } $output->writeListEnd(); @@ -17114,18 +17174,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size868 = 0; - $_ktype869 = 0; - $_vtype870 = 0; - $xfer += $input->readMapBegin($_ktype869, $_vtype870, $_size868); - for ($_i872 = 0; $_i872 < $_size868; ++$_i872) + $_size896 = 0; + $_ktype897 = 0; + $_vtype898 = 0; + $xfer += $input->readMapBegin($_ktype897, $_vtype898, $_size896); + for ($_i900 = 0; $_i900 < $_size896; ++$_i900) { - $key873 = ''; - $val874 = new \metastore\Type(); - $xfer += $input->readString($key873); - $val874 = new \metastore\Type(); - $xfer += $val874->read($input); - $this->success[$key873] = $val874; + $key901 = ''; + $val902 = new \metastore\Type(); + $xfer += $input->readString($key901); + $val902 = new \metastore\Type(); + $xfer += $val902->read($input); + $this->success[$key901] = $val902; } $xfer += $input->readMapEnd(); } else { @@ -17161,10 +17221,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter875 => $viter876) + foreach ($this->success as $kiter903 => $viter904) { - $xfer += $output->writeString($kiter875); - $xfer += $viter876->write($output); + $xfer += $output->writeString($kiter903); + $xfer += $viter904->write($output); } } $output->writeMapEnd(); @@ -17368,15 +17428,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size877 = 0; - $_etype880 = 0; - $xfer += $input->readListBegin($_etype880, $_size877); - for ($_i881 = 0; $_i881 < $_size877; ++$_i881) + $_size905 = 0; + $_etype908 = 0; + $xfer += $input->readListBegin($_etype908, $_size905); + for ($_i909 = 0; $_i909 < $_size905; ++$_i909) { - $elem882 = null; - $elem882 = new \metastore\FieldSchema(); - $xfer += $elem882->read($input); - $this->success []= $elem882; + $elem910 = null; + $elem910 = new \metastore\FieldSchema(); + $xfer += $elem910->read($input); + $this->success []= $elem910; } $xfer += $input->readListEnd(); } else { @@ -17428,9 +17488,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter883) + foreach ($this->success as $iter911) { - $xfer += $iter883->write($output); + $xfer += $iter911->write($output); } } $output->writeListEnd(); @@ -17672,15 +17732,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size884 = 0; - $_etype887 = 0; - $xfer += $input->readListBegin($_etype887, $_size884); - for ($_i888 = 0; $_i888 < $_size884; ++$_i888) + $_size912 = 0; + $_etype915 = 0; + $xfer += $input->readListBegin($_etype915, $_size912); + for ($_i916 = 0; $_i916 < $_size912; ++$_i916) { - $elem889 = null; - $elem889 = new \metastore\FieldSchema(); - $xfer += $elem889->read($input); - $this->success []= $elem889; + $elem917 = null; + $elem917 = new \metastore\FieldSchema(); + $xfer += $elem917->read($input); + $this->success []= $elem917; } $xfer += $input->readListEnd(); } else { @@ -17732,9 +17792,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter890) + foreach ($this->success as $iter918) { - $xfer += $iter890->write($output); + $xfer += $iter918->write($output); } } $output->writeListEnd(); @@ -17948,15 +18008,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size891 = 0; - $_etype894 = 0; - $xfer += $input->readListBegin($_etype894, $_size891); - for ($_i895 = 0; $_i895 < $_size891; ++$_i895) + $_size919 = 0; + $_etype922 = 0; + $xfer += $input->readListBegin($_etype922, $_size919); + for ($_i923 = 0; $_i923 < $_size919; ++$_i923) { - $elem896 = null; - $elem896 = new \metastore\FieldSchema(); - $xfer += $elem896->read($input); - $this->success []= $elem896; + $elem924 = null; + $elem924 = new \metastore\FieldSchema(); + $xfer += $elem924->read($input); + $this->success []= $elem924; } $xfer += $input->readListEnd(); } else { @@ -18008,9 +18068,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter897) + foreach ($this->success as $iter925) { - $xfer += $iter897->write($output); + $xfer += $iter925->write($output); } } $output->writeListEnd(); @@ -18252,15 +18312,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size898 = 0; - $_etype901 = 0; - $xfer += $input->readListBegin($_etype901, $_size898); - for ($_i902 = 0; $_i902 < $_size898; ++$_i902) + $_size926 = 0; + $_etype929 = 0; + $xfer += $input->readListBegin($_etype929, $_size926); + for ($_i930 = 0; $_i930 < $_size926; ++$_i930) { - $elem903 = null; - $elem903 = new \metastore\FieldSchema(); - $xfer += $elem903->read($input); - $this->success []= $elem903; + $elem931 = null; + $elem931 = new \metastore\FieldSchema(); + $xfer += $elem931->read($input); + $this->success []= $elem931; } $xfer += $input->readListEnd(); } else { @@ -18312,9 +18372,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter904) + foreach ($this->success as $iter932) { - $xfer += $iter904->write($output); + $xfer += $iter932->write($output); } } $output->writeListEnd(); @@ -18986,15 +19046,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size905 = 0; - $_etype908 = 0; - $xfer += $input->readListBegin($_etype908, $_size905); - for ($_i909 = 0; $_i909 < $_size905; ++$_i909) + $_size933 = 0; + $_etype936 = 0; + $xfer += $input->readListBegin($_etype936, $_size933); + for ($_i937 = 0; $_i937 < $_size933; ++$_i937) { - $elem910 = null; - $elem910 = new \metastore\SQLPrimaryKey(); - $xfer += $elem910->read($input); - $this->primaryKeys []= $elem910; + $elem938 = null; + $elem938 = new \metastore\SQLPrimaryKey(); + $xfer += $elem938->read($input); + $this->primaryKeys []= $elem938; } $xfer += $input->readListEnd(); } else { @@ -19004,15 +19064,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size911 = 0; - $_etype914 = 0; - $xfer += $input->readListBegin($_etype914, $_size911); - for ($_i915 = 0; $_i915 < $_size911; ++$_i915) + $_size939 = 0; + $_etype942 = 0; + $xfer += $input->readListBegin($_etype942, $_size939); + for ($_i943 = 0; $_i943 < $_size939; ++$_i943) { - $elem916 = null; - $elem916 = new \metastore\SQLForeignKey(); - $xfer += $elem916->read($input); - $this->foreignKeys []= $elem916; + $elem944 = null; + $elem944 = new \metastore\SQLForeignKey(); + $xfer += $elem944->read($input); + $this->foreignKeys []= $elem944; } $xfer += $input->readListEnd(); } else { @@ -19022,15 +19082,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size917 = 0; - $_etype920 = 0; - $xfer += $input->readListBegin($_etype920, $_size917); - for ($_i921 = 0; $_i921 < $_size917; ++$_i921) + $_size945 = 0; + $_etype948 = 0; + $xfer += $input->readListBegin($_etype948, $_size945); + for ($_i949 = 0; $_i949 < $_size945; ++$_i949) { - $elem922 = null; - $elem922 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem922->read($input); - $this->uniqueConstraints []= $elem922; + $elem950 = null; + $elem950 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem950->read($input); + $this->uniqueConstraints []= $elem950; } $xfer += $input->readListEnd(); } else { @@ -19040,15 +19100,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size923 = 0; - $_etype926 = 0; - $xfer += $input->readListBegin($_etype926, $_size923); - for ($_i927 = 0; $_i927 < $_size923; ++$_i927) + $_size951 = 0; + $_etype954 = 0; + $xfer += $input->readListBegin($_etype954, $_size951); + for ($_i955 = 0; $_i955 < $_size951; ++$_i955) { - $elem928 = null; - $elem928 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem928->read($input); - $this->notNullConstraints []= $elem928; + $elem956 = null; + $elem956 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem956->read($input); + $this->notNullConstraints []= $elem956; } $xfer += $input->readListEnd(); } else { @@ -19058,15 +19118,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size929 = 0; - $_etype932 = 0; - $xfer += $input->readListBegin($_etype932, $_size929); - for ($_i933 = 0; $_i933 < $_size929; ++$_i933) + $_size957 = 0; + $_etype960 = 0; + $xfer += $input->readListBegin($_etype960, $_size957); + for ($_i961 = 0; $_i961 < $_size957; ++$_i961) { - $elem934 = null; - $elem934 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem934->read($input); - $this->defaultConstraints []= $elem934; + $elem962 = null; + $elem962 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem962->read($input); + $this->defaultConstraints []= $elem962; } $xfer += $input->readListEnd(); } else { @@ -19076,15 +19136,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size935 = 0; - $_etype938 = 0; - $xfer += $input->readListBegin($_etype938, $_size935); - for ($_i939 = 0; $_i939 < $_size935; ++$_i939) + $_size963 = 0; + $_etype966 = 0; + $xfer += $input->readListBegin($_etype966, $_size963); + for ($_i967 = 0; $_i967 < $_size963; ++$_i967) { - $elem940 = null; - $elem940 = new \metastore\SQLCheckConstraint(); - $xfer += $elem940->read($input); - $this->checkConstraints []= $elem940; + $elem968 = null; + $elem968 = new \metastore\SQLCheckConstraint(); + $xfer += $elem968->read($input); + $this->checkConstraints []= $elem968; } $xfer += $input->readListEnd(); } else { @@ -19120,9 +19180,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter941) + foreach ($this->primaryKeys as $iter969) { - $xfer += $iter941->write($output); + $xfer += $iter969->write($output); } } $output->writeListEnd(); @@ -19137,9 +19197,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter942) + foreach ($this->foreignKeys as $iter970) { - $xfer += $iter942->write($output); + $xfer += $iter970->write($output); } } $output->writeListEnd(); @@ -19154,9 +19214,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter943) + foreach ($this->uniqueConstraints as $iter971) { - $xfer += $iter943->write($output); + $xfer += $iter971->write($output); } } $output->writeListEnd(); @@ -19171,9 +19231,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter944) + foreach ($this->notNullConstraints as $iter972) { - $xfer += $iter944->write($output); + $xfer += $iter972->write($output); } } $output->writeListEnd(); @@ -19188,9 +19248,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter945) + foreach ($this->defaultConstraints as $iter973) { - $xfer += $iter945->write($output); + $xfer += $iter973->write($output); } } $output->writeListEnd(); @@ -19205,9 +19265,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter946) + foreach ($this->checkConstraints as $iter974) { - $xfer += $iter946->write($output); + $xfer += $iter974->write($output); } } $output->writeListEnd(); @@ -21207,14 +21267,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size947 = 0; - $_etype950 = 0; - $xfer += $input->readListBegin($_etype950, $_size947); - for ($_i951 = 0; $_i951 < $_size947; ++$_i951) + $_size975 = 0; + $_etype978 = 0; + $xfer += $input->readListBegin($_etype978, $_size975); + for ($_i979 = 0; $_i979 < $_size975; ++$_i979) { - $elem952 = null; - $xfer += $input->readString($elem952); - $this->partNames []= $elem952; + $elem980 = null; + $xfer += $input->readString($elem980); + $this->partNames []= $elem980; } $xfer += $input->readListEnd(); } else { @@ -21252,9 +21312,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter953) + foreach ($this->partNames as $iter981) { - $xfer += $output->writeString($iter953); + $xfer += $output->writeString($iter981); } } $output->writeListEnd(); @@ -21690,14 +21750,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size954 = 0; - $_etype957 = 0; - $xfer += $input->readListBegin($_etype957, $_size954); - for ($_i958 = 0; $_i958 < $_size954; ++$_i958) + $_size982 = 0; + $_etype985 = 0; + $xfer += $input->readListBegin($_etype985, $_size982); + for ($_i986 = 0; $_i986 < $_size982; ++$_i986) { - $elem959 = null; - $xfer += $input->readString($elem959); - $this->success []= $elem959; + $elem987 = null; + $xfer += $input->readString($elem987); + $this->success []= $elem987; } $xfer += $input->readListEnd(); } else { @@ -21733,9 +21793,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter960) + foreach ($this->success as $iter988) { - $xfer += $output->writeString($iter960); + $xfer += $output->writeString($iter988); } } $output->writeListEnd(); @@ -21937,14 +21997,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size961 = 0; - $_etype964 = 0; - $xfer += $input->readListBegin($_etype964, $_size961); - for ($_i965 = 0; $_i965 < $_size961; ++$_i965) + $_size989 = 0; + $_etype992 = 0; + $xfer += $input->readListBegin($_etype992, $_size989); + for ($_i993 = 0; $_i993 < $_size989; ++$_i993) { - $elem966 = null; - $xfer += $input->readString($elem966); - $this->success []= $elem966; + $elem994 = null; + $xfer += $input->readString($elem994); + $this->success []= $elem994; } $xfer += $input->readListEnd(); } else { @@ -21980,9 +22040,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter967) + foreach ($this->success as $iter995) { - $xfer += $output->writeString($iter967); + $xfer += $output->writeString($iter995); } } $output->writeListEnd(); @@ -22138,14 +22198,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size968 = 0; - $_etype971 = 0; - $xfer += $input->readListBegin($_etype971, $_size968); - for ($_i972 = 0; $_i972 < $_size968; ++$_i972) + $_size996 = 0; + $_etype999 = 0; + $xfer += $input->readListBegin($_etype999, $_size996); + for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000) { - $elem973 = null; - $xfer += $input->readString($elem973); - $this->success []= $elem973; + $elem1001 = null; + $xfer += $input->readString($elem1001); + $this->success []= $elem1001; } $xfer += $input->readListEnd(); } else { @@ -22181,9 +22241,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter974) + foreach ($this->success as $iter1002) { - $xfer += $output->writeString($iter974); + $xfer += $output->writeString($iter1002); } } $output->writeListEnd(); @@ -22288,14 +22348,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size975 = 0; - $_etype978 = 0; - $xfer += $input->readListBegin($_etype978, $_size975); - for ($_i979 = 0; $_i979 < $_size975; ++$_i979) + $_size1003 = 0; + $_etype1006 = 0; + $xfer += $input->readListBegin($_etype1006, $_size1003); + for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007) { - $elem980 = null; - $xfer += $input->readString($elem980); - $this->tbl_types []= $elem980; + $elem1008 = null; + $xfer += $input->readString($elem1008); + $this->tbl_types []= $elem1008; } $xfer += $input->readListEnd(); } else { @@ -22333,9 +22393,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter981) + foreach ($this->tbl_types as $iter1009) { - $xfer += $output->writeString($iter981); + $xfer += $output->writeString($iter1009); } } $output->writeListEnd(); @@ -22412,15 +22472,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size982 = 0; - $_etype985 = 0; - $xfer += $input->readListBegin($_etype985, $_size982); - for ($_i986 = 0; $_i986 < $_size982; ++$_i986) + $_size1010 = 0; + $_etype1013 = 0; + $xfer += $input->readListBegin($_etype1013, $_size1010); + for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014) { - $elem987 = null; - $elem987 = new \metastore\TableMeta(); - $xfer += $elem987->read($input); - $this->success []= $elem987; + $elem1015 = null; + $elem1015 = new \metastore\TableMeta(); + $xfer += $elem1015->read($input); + $this->success []= $elem1015; } $xfer += $input->readListEnd(); } else { @@ -22456,9 +22516,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter988) + foreach ($this->success as $iter1016) { - $xfer += $iter988->write($output); + $xfer += $iter1016->write($output); } } $output->writeListEnd(); @@ -22614,14 +22674,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size989 = 0; - $_etype992 = 0; - $xfer += $input->readListBegin($_etype992, $_size989); - for ($_i993 = 0; $_i993 < $_size989; ++$_i993) + $_size1017 = 0; + $_etype1020 = 0; + $xfer += $input->readListBegin($_etype1020, $_size1017); + for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021) { - $elem994 = null; - $xfer += $input->readString($elem994); - $this->success []= $elem994; + $elem1022 = null; + $xfer += $input->readString($elem1022); + $this->success []= $elem1022; } $xfer += $input->readListEnd(); } else { @@ -22657,9 +22717,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter995) + foreach ($this->success as $iter1023) { - $xfer += $output->writeString($iter995); + $xfer += $output->writeString($iter1023); } } $output->writeListEnd(); @@ -22974,14 +23034,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size996 = 0; - $_etype999 = 0; - $xfer += $input->readListBegin($_etype999, $_size996); - for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000) + $_size1024 = 0; + $_etype1027 = 0; + $xfer += $input->readListBegin($_etype1027, $_size1024); + for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028) { - $elem1001 = null; - $xfer += $input->readString($elem1001); - $this->tbl_names []= $elem1001; + $elem1029 = null; + $xfer += $input->readString($elem1029); + $this->tbl_names []= $elem1029; } $xfer += $input->readListEnd(); } else { @@ -23014,9 +23074,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter1002) + foreach ($this->tbl_names as $iter1030) { - $xfer += $output->writeString($iter1002); + $xfer += $output->writeString($iter1030); } } $output->writeListEnd(); @@ -23081,15 +23141,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1003 = 0; - $_etype1006 = 0; - $xfer += $input->readListBegin($_etype1006, $_size1003); - for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007) + $_size1031 = 0; + $_etype1034 = 0; + $xfer += $input->readListBegin($_etype1034, $_size1031); + for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035) { - $elem1008 = null; - $elem1008 = new \metastore\Table(); - $xfer += $elem1008->read($input); - $this->success []= $elem1008; + $elem1036 = null; + $elem1036 = new \metastore\Table(); + $xfer += $elem1036->read($input); + $this->success []= $elem1036; } $xfer += $input->readListEnd(); } else { @@ -23117,9 +23177,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1009) + foreach ($this->success as $iter1037) { - $xfer += $iter1009->write($output); + $xfer += $iter1037->write($output); } } $output->writeListEnd(); @@ -24319,14 +24379,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1010 = 0; - $_etype1013 = 0; - $xfer += $input->readListBegin($_etype1013, $_size1010); - for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014) + $_size1038 = 0; + $_etype1041 = 0; + $xfer += $input->readListBegin($_etype1041, $_size1038); + for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042) { - $elem1015 = null; - $xfer += $input->readString($elem1015); - $this->success []= $elem1015; + $elem1043 = null; + $xfer += $input->readString($elem1043); + $this->success []= $elem1043; } $xfer += $input->readListEnd(); } else { @@ -24378,9 +24438,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1016) + foreach ($this->success as $iter1044) { - $xfer += $output->writeString($iter1016); + $xfer += $output->writeString($iter1044); } } $output->writeListEnd(); @@ -25903,15 +25963,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1017 = 0; - $_etype1020 = 0; - $xfer += $input->readListBegin($_etype1020, $_size1017); - for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021) + $_size1045 = 0; + $_etype1048 = 0; + $xfer += $input->readListBegin($_etype1048, $_size1045); + for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) { - $elem1022 = null; - $elem1022 = new \metastore\Partition(); - $xfer += $elem1022->read($input); - $this->new_parts []= $elem1022; + $elem1050 = null; + $elem1050 = new \metastore\Partition(); + $xfer += $elem1050->read($input); + $this->new_parts []= $elem1050; } $xfer += $input->readListEnd(); } else { @@ -25939,9 +25999,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1023) + foreach ($this->new_parts as $iter1051) { - $xfer += $iter1023->write($output); + $xfer += $iter1051->write($output); } } $output->writeListEnd(); @@ -26156,15 +26216,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1024 = 0; - $_etype1027 = 0; - $xfer += $input->readListBegin($_etype1027, $_size1024); - for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028) + $_size1052 = 0; + $_etype1055 = 0; + $xfer += $input->readListBegin($_etype1055, $_size1052); + for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) { - $elem1029 = null; - $elem1029 = new \metastore\PartitionSpec(); - $xfer += $elem1029->read($input); - $this->new_parts []= $elem1029; + $elem1057 = null; + $elem1057 = new \metastore\PartitionSpec(); + $xfer += $elem1057->read($input); + $this->new_parts []= $elem1057; } $xfer += $input->readListEnd(); } else { @@ -26192,9 +26252,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1030) + foreach ($this->new_parts as $iter1058) { - $xfer += $iter1030->write($output); + $xfer += $iter1058->write($output); } } $output->writeListEnd(); @@ -26444,14 +26504,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1031 = 0; - $_etype1034 = 0; - $xfer += $input->readListBegin($_etype1034, $_size1031); - for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035) + $_size1059 = 0; + $_etype1062 = 0; + $xfer += $input->readListBegin($_etype1062, $_size1059); + for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) { - $elem1036 = null; - $xfer += $input->readString($elem1036); - $this->part_vals []= $elem1036; + $elem1064 = null; + $xfer += $input->readString($elem1064); + $this->part_vals []= $elem1064; } $xfer += $input->readListEnd(); } else { @@ -26489,9 +26549,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1037) + foreach ($this->part_vals as $iter1065) { - $xfer += $output->writeString($iter1037); + $xfer += $output->writeString($iter1065); } } $output->writeListEnd(); @@ -26993,14 +27053,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1038 = 0; - $_etype1041 = 0; - $xfer += $input->readListBegin($_etype1041, $_size1038); - for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042) + $_size1066 = 0; + $_etype1069 = 0; + $xfer += $input->readListBegin($_etype1069, $_size1066); + for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) { - $elem1043 = null; - $xfer += $input->readString($elem1043); - $this->part_vals []= $elem1043; + $elem1071 = null; + $xfer += $input->readString($elem1071); + $this->part_vals []= $elem1071; } $xfer += $input->readListEnd(); } else { @@ -27046,9 +27106,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1044) + foreach ($this->part_vals as $iter1072) { - $xfer += $output->writeString($iter1044); + $xfer += $output->writeString($iter1072); } } $output->writeListEnd(); @@ -27902,14 +27962,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1045 = 0; - $_etype1048 = 0; - $xfer += $input->readListBegin($_etype1048, $_size1045); - for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) + $_size1073 = 0; + $_etype1076 = 0; + $xfer += $input->readListBegin($_etype1076, $_size1073); + for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) { - $elem1050 = null; - $xfer += $input->readString($elem1050); - $this->part_vals []= $elem1050; + $elem1078 = null; + $xfer += $input->readString($elem1078); + $this->part_vals []= $elem1078; } $xfer += $input->readListEnd(); } else { @@ -27954,9 +28014,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1051) + foreach ($this->part_vals as $iter1079) { - $xfer += $output->writeString($iter1051); + $xfer += $output->writeString($iter1079); } } $output->writeListEnd(); @@ -28209,14 +28269,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1052 = 0; - $_etype1055 = 0; - $xfer += $input->readListBegin($_etype1055, $_size1052); - for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) + $_size1080 = 0; + $_etype1083 = 0; + $xfer += $input->readListBegin($_etype1083, $_size1080); + for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) { - $elem1057 = null; - $xfer += $input->readString($elem1057); - $this->part_vals []= $elem1057; + $elem1085 = null; + $xfer += $input->readString($elem1085); + $this->part_vals []= $elem1085; } $xfer += $input->readListEnd(); } else { @@ -28269,9 +28329,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1058) + foreach ($this->part_vals as $iter1086) { - $xfer += $output->writeString($iter1058); + $xfer += $output->writeString($iter1086); } } $output->writeListEnd(); @@ -29285,14 +29345,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1059 = 0; - $_etype1062 = 0; - $xfer += $input->readListBegin($_etype1062, $_size1059); - for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) + $_size1087 = 0; + $_etype1090 = 0; + $xfer += $input->readListBegin($_etype1090, $_size1087); + for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) { - $elem1064 = null; - $xfer += $input->readString($elem1064); - $this->part_vals []= $elem1064; + $elem1092 = null; + $xfer += $input->readString($elem1092); + $this->part_vals []= $elem1092; } $xfer += $input->readListEnd(); } else { @@ -29330,9 +29390,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1065) + foreach ($this->part_vals as $iter1093) { - $xfer += $output->writeString($iter1065); + $xfer += $output->writeString($iter1093); } } $output->writeListEnd(); @@ -29574,17 +29634,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1066 = 0; - $_ktype1067 = 0; - $_vtype1068 = 0; - $xfer += $input->readMapBegin($_ktype1067, $_vtype1068, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1094 = 0; + $_ktype1095 = 0; + $_vtype1096 = 0; + $xfer += $input->readMapBegin($_ktype1095, $_vtype1096, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $key1071 = ''; - $val1072 = ''; - $xfer += $input->readString($key1071); - $xfer += $input->readString($val1072); - $this->partitionSpecs[$key1071] = $val1072; + $key1099 = ''; + $val1100 = ''; + $xfer += $input->readString($key1099); + $xfer += $input->readString($val1100); + $this->partitionSpecs[$key1099] = $val1100; } $xfer += $input->readMapEnd(); } else { @@ -29640,10 +29700,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1073 => $viter1074) + foreach ($this->partitionSpecs as $kiter1101 => $viter1102) { - $xfer += $output->writeString($kiter1073); - $xfer += $output->writeString($viter1074); + $xfer += $output->writeString($kiter1101); + $xfer += $output->writeString($viter1102); } } $output->writeMapEnd(); @@ -29955,17 +30015,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1075 = 0; - $_ktype1076 = 0; - $_vtype1077 = 0; - $xfer += $input->readMapBegin($_ktype1076, $_vtype1077, $_size1075); - for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) + $_size1103 = 0; + $_ktype1104 = 0; + $_vtype1105 = 0; + $xfer += $input->readMapBegin($_ktype1104, $_vtype1105, $_size1103); + for ($_i1107 = 0; $_i1107 < $_size1103; ++$_i1107) { - $key1080 = ''; - $val1081 = ''; - $xfer += $input->readString($key1080); - $xfer += $input->readString($val1081); - $this->partitionSpecs[$key1080] = $val1081; + $key1108 = ''; + $val1109 = ''; + $xfer += $input->readString($key1108); + $xfer += $input->readString($val1109); + $this->partitionSpecs[$key1108] = $val1109; } $xfer += $input->readMapEnd(); } else { @@ -30021,10 +30081,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1082 => $viter1083) + foreach ($this->partitionSpecs as $kiter1110 => $viter1111) { - $xfer += $output->writeString($kiter1082); - $xfer += $output->writeString($viter1083); + $xfer += $output->writeString($kiter1110); + $xfer += $output->writeString($viter1111); } } $output->writeMapEnd(); @@ -30157,15 +30217,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1084 = 0; - $_etype1087 = 0; - $xfer += $input->readListBegin($_etype1087, $_size1084); - for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088) + $_size1112 = 0; + $_etype1115 = 0; + $xfer += $input->readListBegin($_etype1115, $_size1112); + for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116) { - $elem1089 = null; - $elem1089 = new \metastore\Partition(); - $xfer += $elem1089->read($input); - $this->success []= $elem1089; + $elem1117 = null; + $elem1117 = new \metastore\Partition(); + $xfer += $elem1117->read($input); + $this->success []= $elem1117; } $xfer += $input->readListEnd(); } else { @@ -30225,9 +30285,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1090) + foreach ($this->success as $iter1118) { - $xfer += $iter1090->write($output); + $xfer += $iter1118->write($output); } } $output->writeListEnd(); @@ -30373,14 +30433,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1091 = 0; - $_etype1094 = 0; - $xfer += $input->readListBegin($_etype1094, $_size1091); - for ($_i1095 = 0; $_i1095 < $_size1091; ++$_i1095) + $_size1119 = 0; + $_etype1122 = 0; + $xfer += $input->readListBegin($_etype1122, $_size1119); + for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123) { - $elem1096 = null; - $xfer += $input->readString($elem1096); - $this->part_vals []= $elem1096; + $elem1124 = null; + $xfer += $input->readString($elem1124); + $this->part_vals []= $elem1124; } $xfer += $input->readListEnd(); } else { @@ -30397,14 +30457,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1097 = 0; - $_etype1100 = 0; - $xfer += $input->readListBegin($_etype1100, $_size1097); - for ($_i1101 = 0; $_i1101 < $_size1097; ++$_i1101) + $_size1125 = 0; + $_etype1128 = 0; + $xfer += $input->readListBegin($_etype1128, $_size1125); + for ($_i1129 = 0; $_i1129 < $_size1125; ++$_i1129) { - $elem1102 = null; - $xfer += $input->readString($elem1102); - $this->group_names []= $elem1102; + $elem1130 = null; + $xfer += $input->readString($elem1130); + $this->group_names []= $elem1130; } $xfer += $input->readListEnd(); } else { @@ -30442,9 +30502,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1103) + foreach ($this->part_vals as $iter1131) { - $xfer += $output->writeString($iter1103); + $xfer += $output->writeString($iter1131); } } $output->writeListEnd(); @@ -30464,9 +30524,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1104) + foreach ($this->group_names as $iter1132) { - $xfer += $output->writeString($iter1104); + $xfer += $output->writeString($iter1132); } } $output->writeListEnd(); @@ -31057,15 +31117,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1105 = 0; - $_etype1108 = 0; - $xfer += $input->readListBegin($_etype1108, $_size1105); - for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109) + $_size1133 = 0; + $_etype1136 = 0; + $xfer += $input->readListBegin($_etype1136, $_size1133); + for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137) { - $elem1110 = null; - $elem1110 = new \metastore\Partition(); - $xfer += $elem1110->read($input); - $this->success []= $elem1110; + $elem1138 = null; + $elem1138 = new \metastore\Partition(); + $xfer += $elem1138->read($input); + $this->success []= $elem1138; } $xfer += $input->readListEnd(); } else { @@ -31109,9 +31169,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1111) + foreach ($this->success as $iter1139) { - $xfer += $iter1111->write($output); + $xfer += $iter1139->write($output); } } $output->writeListEnd(); @@ -31257,14 +31317,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1112 = 0; - $_etype1115 = 0; - $xfer += $input->readListBegin($_etype1115, $_size1112); - for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116) + $_size1140 = 0; + $_etype1143 = 0; + $xfer += $input->readListBegin($_etype1143, $_size1140); + for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144) { - $elem1117 = null; - $xfer += $input->readString($elem1117); - $this->group_names []= $elem1117; + $elem1145 = null; + $xfer += $input->readString($elem1145); + $this->group_names []= $elem1145; } $xfer += $input->readListEnd(); } else { @@ -31312,9 +31372,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1118) + foreach ($this->group_names as $iter1146) { - $xfer += $output->writeString($iter1118); + $xfer += $output->writeString($iter1146); } } $output->writeListEnd(); @@ -31403,15 +31463,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1119 = 0; - $_etype1122 = 0; - $xfer += $input->readListBegin($_etype1122, $_size1119); - for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123) + $_size1147 = 0; + $_etype1150 = 0; + $xfer += $input->readListBegin($_etype1150, $_size1147); + for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151) { - $elem1124 = null; - $elem1124 = new \metastore\Partition(); - $xfer += $elem1124->read($input); - $this->success []= $elem1124; + $elem1152 = null; + $elem1152 = new \metastore\Partition(); + $xfer += $elem1152->read($input); + $this->success []= $elem1152; } $xfer += $input->readListEnd(); } else { @@ -31455,9 +31515,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1125) + foreach ($this->success as $iter1153) { - $xfer += $iter1125->write($output); + $xfer += $iter1153->write($output); } } $output->writeListEnd(); @@ -31677,15 +31737,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1126 = 0; - $_etype1129 = 0; - $xfer += $input->readListBegin($_etype1129, $_size1126); - for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130) + $_size1154 = 0; + $_etype1157 = 0; + $xfer += $input->readListBegin($_etype1157, $_size1154); + for ($_i1158 = 0; $_i1158 < $_size1154; ++$_i1158) { - $elem1131 = null; - $elem1131 = new \metastore\PartitionSpec(); - $xfer += $elem1131->read($input); - $this->success []= $elem1131; + $elem1159 = null; + $elem1159 = new \metastore\PartitionSpec(); + $xfer += $elem1159->read($input); + $this->success []= $elem1159; } $xfer += $input->readListEnd(); } else { @@ -31729,9 +31789,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1132) + foreach ($this->success as $iter1160) { - $xfer += $iter1132->write($output); + $xfer += $iter1160->write($output); } } $output->writeListEnd(); @@ -31950,14 +32010,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1133 = 0; - $_etype1136 = 0; - $xfer += $input->readListBegin($_etype1136, $_size1133); - for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137) + $_size1161 = 0; + $_etype1164 = 0; + $xfer += $input->readListBegin($_etype1164, $_size1161); + for ($_i1165 = 0; $_i1165 < $_size1161; ++$_i1165) { - $elem1138 = null; - $xfer += $input->readString($elem1138); - $this->success []= $elem1138; + $elem1166 = null; + $xfer += $input->readString($elem1166); + $this->success []= $elem1166; } $xfer += $input->readListEnd(); } else { @@ -32001,9 +32061,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1139) + foreach ($this->success as $iter1167) { - $xfer += $output->writeString($iter1139); + $xfer += $output->writeString($iter1167); } } $output->writeListEnd(); @@ -32334,14 +32394,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1140 = 0; - $_etype1143 = 0; - $xfer += $input->readListBegin($_etype1143, $_size1140); - for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144) + $_size1168 = 0; + $_etype1171 = 0; + $xfer += $input->readListBegin($_etype1171, $_size1168); + for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172) { - $elem1145 = null; - $xfer += $input->readString($elem1145); - $this->part_vals []= $elem1145; + $elem1173 = null; + $xfer += $input->readString($elem1173); + $this->part_vals []= $elem1173; } $xfer += $input->readListEnd(); } else { @@ -32386,9 +32446,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1146) + foreach ($this->part_vals as $iter1174) { - $xfer += $output->writeString($iter1146); + $xfer += $output->writeString($iter1174); } } $output->writeListEnd(); @@ -32482,15 +32542,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1147 = 0; - $_etype1150 = 0; - $xfer += $input->readListBegin($_etype1150, $_size1147); - for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151) + $_size1175 = 0; + $_etype1178 = 0; + $xfer += $input->readListBegin($_etype1178, $_size1175); + for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179) { - $elem1152 = null; - $elem1152 = new \metastore\Partition(); - $xfer += $elem1152->read($input); - $this->success []= $elem1152; + $elem1180 = null; + $elem1180 = new \metastore\Partition(); + $xfer += $elem1180->read($input); + $this->success []= $elem1180; } $xfer += $input->readListEnd(); } else { @@ -32534,9 +32594,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1153) + foreach ($this->success as $iter1181) { - $xfer += $iter1153->write($output); + $xfer += $iter1181->write($output); } } $output->writeListEnd(); @@ -32683,14 +32743,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1154 = 0; - $_etype1157 = 0; - $xfer += $input->readListBegin($_etype1157, $_size1154); - for ($_i1158 = 0; $_i1158 < $_size1154; ++$_i1158) + $_size1182 = 0; + $_etype1185 = 0; + $xfer += $input->readListBegin($_etype1185, $_size1182); + for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186) { - $elem1159 = null; - $xfer += $input->readString($elem1159); - $this->part_vals []= $elem1159; + $elem1187 = null; + $xfer += $input->readString($elem1187); + $this->part_vals []= $elem1187; } $xfer += $input->readListEnd(); } else { @@ -32714,14 +32774,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1160 = 0; - $_etype1163 = 0; - $xfer += $input->readListBegin($_etype1163, $_size1160); - for ($_i1164 = 0; $_i1164 < $_size1160; ++$_i1164) + $_size1188 = 0; + $_etype1191 = 0; + $xfer += $input->readListBegin($_etype1191, $_size1188); + for ($_i1192 = 0; $_i1192 < $_size1188; ++$_i1192) { - $elem1165 = null; - $xfer += $input->readString($elem1165); - $this->group_names []= $elem1165; + $elem1193 = null; + $xfer += $input->readString($elem1193); + $this->group_names []= $elem1193; } $xfer += $input->readListEnd(); } else { @@ -32759,9 +32819,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1166) + foreach ($this->part_vals as $iter1194) { - $xfer += $output->writeString($iter1166); + $xfer += $output->writeString($iter1194); } } $output->writeListEnd(); @@ -32786,9 +32846,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1167) + foreach ($this->group_names as $iter1195) { - $xfer += $output->writeString($iter1167); + $xfer += $output->writeString($iter1195); } } $output->writeListEnd(); @@ -32877,15 +32937,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1168 = 0; - $_etype1171 = 0; - $xfer += $input->readListBegin($_etype1171, $_size1168); - for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172) + $_size1196 = 0; + $_etype1199 = 0; + $xfer += $input->readListBegin($_etype1199, $_size1196); + for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200) { - $elem1173 = null; - $elem1173 = new \metastore\Partition(); - $xfer += $elem1173->read($input); - $this->success []= $elem1173; + $elem1201 = null; + $elem1201 = new \metastore\Partition(); + $xfer += $elem1201->read($input); + $this->success []= $elem1201; } $xfer += $input->readListEnd(); } else { @@ -32929,9 +32989,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1174) + foreach ($this->success as $iter1202) { - $xfer += $iter1174->write($output); + $xfer += $iter1202->write($output); } } $output->writeListEnd(); @@ -33052,14 +33112,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1175 = 0; - $_etype1178 = 0; - $xfer += $input->readListBegin($_etype1178, $_size1175); - for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179) + $_size1203 = 0; + $_etype1206 = 0; + $xfer += $input->readListBegin($_etype1206, $_size1203); + for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) { - $elem1180 = null; - $xfer += $input->readString($elem1180); - $this->part_vals []= $elem1180; + $elem1208 = null; + $xfer += $input->readString($elem1208); + $this->part_vals []= $elem1208; } $xfer += $input->readListEnd(); } else { @@ -33104,9 +33164,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1181) + foreach ($this->part_vals as $iter1209) { - $xfer += $output->writeString($iter1181); + $xfer += $output->writeString($iter1209); } } $output->writeListEnd(); @@ -33199,14 +33259,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1182 = 0; - $_etype1185 = 0; - $xfer += $input->readListBegin($_etype1185, $_size1182); - for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186) + $_size1210 = 0; + $_etype1213 = 0; + $xfer += $input->readListBegin($_etype1213, $_size1210); + for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214) { - $elem1187 = null; - $xfer += $input->readString($elem1187); - $this->success []= $elem1187; + $elem1215 = null; + $xfer += $input->readString($elem1215); + $this->success []= $elem1215; } $xfer += $input->readListEnd(); } else { @@ -33250,9 +33310,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1188) + foreach ($this->success as $iter1216) { - $xfer += $output->writeString($iter1188); + $xfer += $output->writeString($iter1216); } } $output->writeListEnd(); @@ -33495,15 +33555,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1189 = 0; - $_etype1192 = 0; - $xfer += $input->readListBegin($_etype1192, $_size1189); - for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193) + $_size1217 = 0; + $_etype1220 = 0; + $xfer += $input->readListBegin($_etype1220, $_size1217); + for ($_i1221 = 0; $_i1221 < $_size1217; ++$_i1221) { - $elem1194 = null; - $elem1194 = new \metastore\Partition(); - $xfer += $elem1194->read($input); - $this->success []= $elem1194; + $elem1222 = null; + $elem1222 = new \metastore\Partition(); + $xfer += $elem1222->read($input); + $this->success []= $elem1222; } $xfer += $input->readListEnd(); } else { @@ -33547,9 +33607,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1195) + foreach ($this->success as $iter1223) { - $xfer += $iter1195->write($output); + $xfer += $iter1223->write($output); } } $output->writeListEnd(); @@ -33792,15 +33852,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1196 = 0; - $_etype1199 = 0; - $xfer += $input->readListBegin($_etype1199, $_size1196); - for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200) + $_size1224 = 0; + $_etype1227 = 0; + $xfer += $input->readListBegin($_etype1227, $_size1224); + for ($_i1228 = 0; $_i1228 < $_size1224; ++$_i1228) { - $elem1201 = null; - $elem1201 = new \metastore\PartitionSpec(); - $xfer += $elem1201->read($input); - $this->success []= $elem1201; + $elem1229 = null; + $elem1229 = new \metastore\PartitionSpec(); + $xfer += $elem1229->read($input); + $this->success []= $elem1229; } $xfer += $input->readListEnd(); } else { @@ -33844,9 +33904,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1202) + foreach ($this->success as $iter1230) { - $xfer += $iter1202->write($output); + $xfer += $iter1230->write($output); } } $output->writeListEnd(); @@ -34412,14 +34472,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1203 = 0; - $_etype1206 = 0; - $xfer += $input->readListBegin($_etype1206, $_size1203); - for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) + $_size1231 = 0; + $_etype1234 = 0; + $xfer += $input->readListBegin($_etype1234, $_size1231); + for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) { - $elem1208 = null; - $xfer += $input->readString($elem1208); - $this->names []= $elem1208; + $elem1236 = null; + $xfer += $input->readString($elem1236); + $this->names []= $elem1236; } $xfer += $input->readListEnd(); } else { @@ -34457,9 +34517,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1209) + foreach ($this->names as $iter1237) { - $xfer += $output->writeString($iter1209); + $xfer += $output->writeString($iter1237); } } $output->writeListEnd(); @@ -34548,15 +34608,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1210 = 0; - $_etype1213 = 0; - $xfer += $input->readListBegin($_etype1213, $_size1210); - for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214) + $_size1238 = 0; + $_etype1241 = 0; + $xfer += $input->readListBegin($_etype1241, $_size1238); + for ($_i1242 = 0; $_i1242 < $_size1238; ++$_i1242) { - $elem1215 = null; - $elem1215 = new \metastore\Partition(); - $xfer += $elem1215->read($input); - $this->success []= $elem1215; + $elem1243 = null; + $elem1243 = new \metastore\Partition(); + $xfer += $elem1243->read($input); + $this->success []= $elem1243; } $xfer += $input->readListEnd(); } else { @@ -34600,9 +34660,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1216) + foreach ($this->success as $iter1244) { - $xfer += $iter1216->write($output); + $xfer += $iter1244->write($output); } } $output->writeListEnd(); @@ -34941,15 +35001,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1217 = 0; - $_etype1220 = 0; - $xfer += $input->readListBegin($_etype1220, $_size1217); - for ($_i1221 = 0; $_i1221 < $_size1217; ++$_i1221) + $_size1245 = 0; + $_etype1248 = 0; + $xfer += $input->readListBegin($_etype1248, $_size1245); + for ($_i1249 = 0; $_i1249 < $_size1245; ++$_i1249) { - $elem1222 = null; - $elem1222 = new \metastore\Partition(); - $xfer += $elem1222->read($input); - $this->new_parts []= $elem1222; + $elem1250 = null; + $elem1250 = new \metastore\Partition(); + $xfer += $elem1250->read($input); + $this->new_parts []= $elem1250; } $xfer += $input->readListEnd(); } else { @@ -34987,9 +35047,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1223) + foreach ($this->new_parts as $iter1251) { - $xfer += $iter1223->write($output); + $xfer += $iter1251->write($output); } } $output->writeListEnd(); @@ -35204,15 +35264,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1224 = 0; - $_etype1227 = 0; - $xfer += $input->readListBegin($_etype1227, $_size1224); - for ($_i1228 = 0; $_i1228 < $_size1224; ++$_i1228) + $_size1252 = 0; + $_etype1255 = 0; + $xfer += $input->readListBegin($_etype1255, $_size1252); + for ($_i1256 = 0; $_i1256 < $_size1252; ++$_i1256) { - $elem1229 = null; - $elem1229 = new \metastore\Partition(); - $xfer += $elem1229->read($input); - $this->new_parts []= $elem1229; + $elem1257 = null; + $elem1257 = new \metastore\Partition(); + $xfer += $elem1257->read($input); + $this->new_parts []= $elem1257; } $xfer += $input->readListEnd(); } else { @@ -35258,9 +35318,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1230) + foreach ($this->new_parts as $iter1258) { - $xfer += $iter1230->write($output); + $xfer += $iter1258->write($output); } } $output->writeListEnd(); @@ -35948,14 +36008,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1231 = 0; - $_etype1234 = 0; - $xfer += $input->readListBegin($_etype1234, $_size1231); - for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) + $_size1259 = 0; + $_etype1262 = 0; + $xfer += $input->readListBegin($_etype1262, $_size1259); + for ($_i1263 = 0; $_i1263 < $_size1259; ++$_i1263) { - $elem1236 = null; - $xfer += $input->readString($elem1236); - $this->part_vals []= $elem1236; + $elem1264 = null; + $xfer += $input->readString($elem1264); + $this->part_vals []= $elem1264; } $xfer += $input->readListEnd(); } else { @@ -36001,9 +36061,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1237) + foreach ($this->part_vals as $iter1265) { - $xfer += $output->writeString($iter1237); + $xfer += $output->writeString($iter1265); } } $output->writeListEnd(); @@ -36398,14 +36458,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1238 = 0; - $_etype1241 = 0; - $xfer += $input->readListBegin($_etype1241, $_size1238); - for ($_i1242 = 0; $_i1242 < $_size1238; ++$_i1242) + $_size1266 = 0; + $_etype1269 = 0; + $xfer += $input->readListBegin($_etype1269, $_size1266); + for ($_i1270 = 0; $_i1270 < $_size1266; ++$_i1270) { - $elem1243 = null; - $xfer += $input->readString($elem1243); - $this->part_vals []= $elem1243; + $elem1271 = null; + $xfer += $input->readString($elem1271); + $this->part_vals []= $elem1271; } $xfer += $input->readListEnd(); } else { @@ -36440,9 +36500,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1244) + foreach ($this->part_vals as $iter1272) { - $xfer += $output->writeString($iter1244); + $xfer += $output->writeString($iter1272); } } $output->writeListEnd(); @@ -36896,14 +36956,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1245 = 0; - $_etype1248 = 0; - $xfer += $input->readListBegin($_etype1248, $_size1245); - for ($_i1249 = 0; $_i1249 < $_size1245; ++$_i1249) + $_size1273 = 0; + $_etype1276 = 0; + $xfer += $input->readListBegin($_etype1276, $_size1273); + for ($_i1277 = 0; $_i1277 < $_size1273; ++$_i1277) { - $elem1250 = null; - $xfer += $input->readString($elem1250); - $this->success []= $elem1250; + $elem1278 = null; + $xfer += $input->readString($elem1278); + $this->success []= $elem1278; } $xfer += $input->readListEnd(); } else { @@ -36939,9 +36999,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1251) + foreach ($this->success as $iter1279) { - $xfer += $output->writeString($iter1251); + $xfer += $output->writeString($iter1279); } } $output->writeListEnd(); @@ -37101,17 +37161,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1252 = 0; - $_ktype1253 = 0; - $_vtype1254 = 0; - $xfer += $input->readMapBegin($_ktype1253, $_vtype1254, $_size1252); - for ($_i1256 = 0; $_i1256 < $_size1252; ++$_i1256) + $_size1280 = 0; + $_ktype1281 = 0; + $_vtype1282 = 0; + $xfer += $input->readMapBegin($_ktype1281, $_vtype1282, $_size1280); + for ($_i1284 = 0; $_i1284 < $_size1280; ++$_i1284) { - $key1257 = ''; - $val1258 = ''; - $xfer += $input->readString($key1257); - $xfer += $input->readString($val1258); - $this->success[$key1257] = $val1258; + $key1285 = ''; + $val1286 = ''; + $xfer += $input->readString($key1285); + $xfer += $input->readString($val1286); + $this->success[$key1285] = $val1286; } $xfer += $input->readMapEnd(); } else { @@ -37147,10 +37207,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1259 => $viter1260) + foreach ($this->success as $kiter1287 => $viter1288) { - $xfer += $output->writeString($kiter1259); - $xfer += $output->writeString($viter1260); + $xfer += $output->writeString($kiter1287); + $xfer += $output->writeString($viter1288); } } $output->writeMapEnd(); @@ -37270,17 +37330,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1261 = 0; - $_ktype1262 = 0; - $_vtype1263 = 0; - $xfer += $input->readMapBegin($_ktype1262, $_vtype1263, $_size1261); - for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) + $_size1289 = 0; + $_ktype1290 = 0; + $_vtype1291 = 0; + $xfer += $input->readMapBegin($_ktype1290, $_vtype1291, $_size1289); + for ($_i1293 = 0; $_i1293 < $_size1289; ++$_i1293) { - $key1266 = ''; - $val1267 = ''; - $xfer += $input->readString($key1266); - $xfer += $input->readString($val1267); - $this->part_vals[$key1266] = $val1267; + $key1294 = ''; + $val1295 = ''; + $xfer += $input->readString($key1294); + $xfer += $input->readString($val1295); + $this->part_vals[$key1294] = $val1295; } $xfer += $input->readMapEnd(); } else { @@ -37325,10 +37385,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1268 => $viter1269) + foreach ($this->part_vals as $kiter1296 => $viter1297) { - $xfer += $output->writeString($kiter1268); - $xfer += $output->writeString($viter1269); + $xfer += $output->writeString($kiter1296); + $xfer += $output->writeString($viter1297); } } $output->writeMapEnd(); @@ -37650,17 +37710,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1270 = 0; - $_ktype1271 = 0; - $_vtype1272 = 0; - $xfer += $input->readMapBegin($_ktype1271, $_vtype1272, $_size1270); - for ($_i1274 = 0; $_i1274 < $_size1270; ++$_i1274) + $_size1298 = 0; + $_ktype1299 = 0; + $_vtype1300 = 0; + $xfer += $input->readMapBegin($_ktype1299, $_vtype1300, $_size1298); + for ($_i1302 = 0; $_i1302 < $_size1298; ++$_i1302) { - $key1275 = ''; - $val1276 = ''; - $xfer += $input->readString($key1275); - $xfer += $input->readString($val1276); - $this->part_vals[$key1275] = $val1276; + $key1303 = ''; + $val1304 = ''; + $xfer += $input->readString($key1303); + $xfer += $input->readString($val1304); + $this->part_vals[$key1303] = $val1304; } $xfer += $input->readMapEnd(); } else { @@ -37705,10 +37765,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1277 => $viter1278) + foreach ($this->part_vals as $kiter1305 => $viter1306) { - $xfer += $output->writeString($kiter1277); - $xfer += $output->writeString($viter1278); + $xfer += $output->writeString($kiter1305); + $xfer += $output->writeString($viter1306); } } $output->writeMapEnd(); @@ -43187,14 +43247,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1279 = 0; - $_etype1282 = 0; - $xfer += $input->readListBegin($_etype1282, $_size1279); - for ($_i1283 = 0; $_i1283 < $_size1279; ++$_i1283) + $_size1307 = 0; + $_etype1310 = 0; + $xfer += $input->readListBegin($_etype1310, $_size1307); + for ($_i1311 = 0; $_i1311 < $_size1307; ++$_i1311) { - $elem1284 = null; - $xfer += $input->readString($elem1284); - $this->success []= $elem1284; + $elem1312 = null; + $xfer += $input->readString($elem1312); + $this->success []= $elem1312; } $xfer += $input->readListEnd(); } else { @@ -43230,9 +43290,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1285) + foreach ($this->success as $iter1313) { - $xfer += $output->writeString($iter1285); + $xfer += $output->writeString($iter1313); } } $output->writeListEnd(); @@ -44101,14 +44161,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1286 = 0; - $_etype1289 = 0; - $xfer += $input->readListBegin($_etype1289, $_size1286); - for ($_i1290 = 0; $_i1290 < $_size1286; ++$_i1290) + $_size1314 = 0; + $_etype1317 = 0; + $xfer += $input->readListBegin($_etype1317, $_size1314); + for ($_i1318 = 0; $_i1318 < $_size1314; ++$_i1318) { - $elem1291 = null; - $xfer += $input->readString($elem1291); - $this->success []= $elem1291; + $elem1319 = null; + $xfer += $input->readString($elem1319); + $this->success []= $elem1319; } $xfer += $input->readListEnd(); } else { @@ -44144,9 +44204,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1292) + foreach ($this->success as $iter1320) { - $xfer += $output->writeString($iter1292); + $xfer += $output->writeString($iter1320); } } $output->writeListEnd(); @@ -44837,15 +44897,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1293 = 0; - $_etype1296 = 0; - $xfer += $input->readListBegin($_etype1296, $_size1293); - for ($_i1297 = 0; $_i1297 < $_size1293; ++$_i1297) + $_size1321 = 0; + $_etype1324 = 0; + $xfer += $input->readListBegin($_etype1324, $_size1321); + for ($_i1325 = 0; $_i1325 < $_size1321; ++$_i1325) { - $elem1298 = null; - $elem1298 = new \metastore\Role(); - $xfer += $elem1298->read($input); - $this->success []= $elem1298; + $elem1326 = null; + $elem1326 = new \metastore\Role(); + $xfer += $elem1326->read($input); + $this->success []= $elem1326; } $xfer += $input->readListEnd(); } else { @@ -44881,9 +44941,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1299) + foreach ($this->success as $iter1327) { - $xfer += $iter1299->write($output); + $xfer += $iter1327->write($output); } } $output->writeListEnd(); @@ -45545,14 +45605,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1300 = 0; - $_etype1303 = 0; - $xfer += $input->readListBegin($_etype1303, $_size1300); - for ($_i1304 = 0; $_i1304 < $_size1300; ++$_i1304) + $_size1328 = 0; + $_etype1331 = 0; + $xfer += $input->readListBegin($_etype1331, $_size1328); + for ($_i1332 = 0; $_i1332 < $_size1328; ++$_i1332) { - $elem1305 = null; - $xfer += $input->readString($elem1305); - $this->group_names []= $elem1305; + $elem1333 = null; + $xfer += $input->readString($elem1333); + $this->group_names []= $elem1333; } $xfer += $input->readListEnd(); } else { @@ -45593,9 +45653,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1306) + foreach ($this->group_names as $iter1334) { - $xfer += $output->writeString($iter1306); + $xfer += $output->writeString($iter1334); } } $output->writeListEnd(); @@ -45903,15 +45963,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1307 = 0; - $_etype1310 = 0; - $xfer += $input->readListBegin($_etype1310, $_size1307); - for ($_i1311 = 0; $_i1311 < $_size1307; ++$_i1311) + $_size1335 = 0; + $_etype1338 = 0; + $xfer += $input->readListBegin($_etype1338, $_size1335); + for ($_i1339 = 0; $_i1339 < $_size1335; ++$_i1339) { - $elem1312 = null; - $elem1312 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1312->read($input); - $this->success []= $elem1312; + $elem1340 = null; + $elem1340 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1340->read($input); + $this->success []= $elem1340; } $xfer += $input->readListEnd(); } else { @@ -45947,9 +46007,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1313) + foreach ($this->success as $iter1341) { - $xfer += $iter1313->write($output); + $xfer += $iter1341->write($output); } } $output->writeListEnd(); @@ -46817,14 +46877,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1314 = 0; - $_etype1317 = 0; - $xfer += $input->readListBegin($_etype1317, $_size1314); - for ($_i1318 = 0; $_i1318 < $_size1314; ++$_i1318) + $_size1342 = 0; + $_etype1345 = 0; + $xfer += $input->readListBegin($_etype1345, $_size1342); + for ($_i1346 = 0; $_i1346 < $_size1342; ++$_i1346) { - $elem1319 = null; - $xfer += $input->readString($elem1319); - $this->group_names []= $elem1319; + $elem1347 = null; + $xfer += $input->readString($elem1347); + $this->group_names []= $elem1347; } $xfer += $input->readListEnd(); } else { @@ -46857,9 +46917,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1320) + foreach ($this->group_names as $iter1348) { - $xfer += $output->writeString($iter1320); + $xfer += $output->writeString($iter1348); } } $output->writeListEnd(); @@ -46935,14 +46995,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1321 = 0; - $_etype1324 = 0; - $xfer += $input->readListBegin($_etype1324, $_size1321); - for ($_i1325 = 0; $_i1325 < $_size1321; ++$_i1325) + $_size1349 = 0; + $_etype1352 = 0; + $xfer += $input->readListBegin($_etype1352, $_size1349); + for ($_i1353 = 0; $_i1353 < $_size1349; ++$_i1353) { - $elem1326 = null; - $xfer += $input->readString($elem1326); - $this->success []= $elem1326; + $elem1354 = null; + $xfer += $input->readString($elem1354); + $this->success []= $elem1354; } $xfer += $input->readListEnd(); } else { @@ -46978,9 +47038,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1327) + foreach ($this->success as $iter1355) { - $xfer += $output->writeString($iter1327); + $xfer += $output->writeString($iter1355); } } $output->writeListEnd(); @@ -48097,14 +48157,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1328 = 0; - $_etype1331 = 0; - $xfer += $input->readListBegin($_etype1331, $_size1328); - for ($_i1332 = 0; $_i1332 < $_size1328; ++$_i1332) + $_size1356 = 0; + $_etype1359 = 0; + $xfer += $input->readListBegin($_etype1359, $_size1356); + for ($_i1360 = 0; $_i1360 < $_size1356; ++$_i1360) { - $elem1333 = null; - $xfer += $input->readString($elem1333); - $this->success []= $elem1333; + $elem1361 = null; + $xfer += $input->readString($elem1361); + $this->success []= $elem1361; } $xfer += $input->readListEnd(); } else { @@ -48132,9 +48192,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1334) + foreach ($this->success as $iter1362) { - $xfer += $output->writeString($iter1334); + $xfer += $output->writeString($iter1362); } } $output->writeListEnd(); @@ -48773,14 +48833,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1335 = 0; - $_etype1338 = 0; - $xfer += $input->readListBegin($_etype1338, $_size1335); - for ($_i1339 = 0; $_i1339 < $_size1335; ++$_i1339) + $_size1363 = 0; + $_etype1366 = 0; + $xfer += $input->readListBegin($_etype1366, $_size1363); + for ($_i1367 = 0; $_i1367 < $_size1363; ++$_i1367) { - $elem1340 = null; - $xfer += $input->readString($elem1340); - $this->success []= $elem1340; + $elem1368 = null; + $xfer += $input->readString($elem1368); + $this->success []= $elem1368; } $xfer += $input->readListEnd(); } else { @@ -48808,9 +48868,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1341) + foreach ($this->success as $iter1369) { - $xfer += $output->writeString($iter1341); + $xfer += $output->writeString($iter1369); } } $output->writeListEnd(); @@ -59639,15 +59699,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1342 = 0; - $_etype1345 = 0; - $xfer += $input->readListBegin($_etype1345, $_size1342); - for ($_i1346 = 0; $_i1346 < $_size1342; ++$_i1346) + $_size1370 = 0; + $_etype1373 = 0; + $xfer += $input->readListBegin($_etype1373, $_size1370); + for ($_i1374 = 0; $_i1374 < $_size1370; ++$_i1374) { - $elem1347 = null; - $elem1347 = new \metastore\SchemaVersion(); - $xfer += $elem1347->read($input); - $this->success []= $elem1347; + $elem1375 = null; + $elem1375 = new \metastore\SchemaVersion(); + $xfer += $elem1375->read($input); + $this->success []= $elem1375; } $xfer += $input->readListEnd(); } else { @@ -59691,9 +59751,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1348) + foreach ($this->success as $iter1376) { - $xfer += $iter1348->write($output); + $xfer += $iter1376->write($output); } } $output->writeListEnd(); @@ -61562,15 +61622,15 @@ class ThriftHiveMetastore_get_runtime_stats_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1349 = 0; - $_etype1352 = 0; - $xfer += $input->readListBegin($_etype1352, $_size1349); - for ($_i1353 = 0; $_i1353 < $_size1349; ++$_i1353) + $_size1377 = 0; + $_etype1380 = 0; + $xfer += $input->readListBegin($_etype1380, $_size1377); + for ($_i1381 = 0; $_i1381 < $_size1377; ++$_i1381) { - $elem1354 = null; - $elem1354 = new \metastore\RuntimeStat(); - $xfer += $elem1354->read($input); - $this->success []= $elem1354; + $elem1382 = null; + $elem1382 = new \metastore\RuntimeStat(); + $xfer += $elem1382->read($input); + $this->success []= $elem1382; } $xfer += $input->readListEnd(); } else { @@ -61606,9 +61666,9 @@ class ThriftHiveMetastore_get_runtime_stats_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1355) + foreach ($this->success as $iter1383) { - $xfer += $iter1355->write($output); + $xfer += $iter1383->write($output); } } $output->writeListEnd(); @@ -61627,4 +61687,189 @@ class ThriftHiveMetastore_get_runtime_stats_result { } +class ThriftHiveMetastore_get_partitions_with_specs_args { + static $_TSPEC; + + /** + * @var \metastore\GetPartitionsRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetPartitionsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partitions_with_specs_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\GetPartitionsRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_with_specs_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_partitions_with_specs_result { + static $_TSPEC; + + /** + * @var \metastore\GetPartitionsResponse + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetPartitionsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_partitions_with_specs_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetPartitionsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_partitions_with_specs_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php index 5ed4f71b1dd947c7d4cbb5c290b393b8cd2ea31d..8cefca23c70ac61efeba6fc8007d7604883d7e4c 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php @@ -203,6 +203,17 @@ final class SchemaVersionState { ); } +final class PartitionFilterMode { + const BY_NAMES = 0; + const BY_VALUES = 1; + const BY_EXPR = 2; + static public $__names = array( + 0 => 'BY_NAMES', + 1 => 'BY_VALUES', + 2 => 'BY_EXPR', + ); +} + final class FunctionType { const JAVA = 1; static public $__names = array( @@ -7649,6 +7660,629 @@ class Partition { } +class GetPartitionsProjectSpec { + static $_TSPEC; + + /** + * @var string[] + */ + public $fieldList = null; + /** + * @var string + */ + public $paramKeyPattern = null; + /** + * @var bool + */ + public $excludeParamKeyPattern = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'fieldList', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 2 => array( + 'var' => 'paramKeyPattern', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'excludeParamKeyPattern', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['fieldList'])) { + $this->fieldList = $vals['fieldList']; + } + if (isset($vals['paramKeyPattern'])) { + $this->paramKeyPattern = $vals['paramKeyPattern']; + } + if (isset($vals['excludeParamKeyPattern'])) { + $this->excludeParamKeyPattern = $vals['excludeParamKeyPattern']; + } + } + } + + public function getName() { + return 'GetPartitionsProjectSpec'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->fieldList = array(); + $_size214 = 0; + $_etype217 = 0; + $xfer += $input->readListBegin($_etype217, $_size214); + for ($_i218 = 0; $_i218 < $_size214; ++$_i218) + { + $elem219 = null; + $xfer += $input->readString($elem219); + $this->fieldList []= $elem219; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->paramKeyPattern); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->excludeParamKeyPattern); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetPartitionsProjectSpec'); + if ($this->fieldList !== null) { + if (!is_array($this->fieldList)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('fieldList', TType::LST, 1); + { + $output->writeListBegin(TType::STRING, count($this->fieldList)); + { + foreach ($this->fieldList as $iter220) + { + $xfer += $output->writeString($iter220); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->paramKeyPattern !== null) { + $xfer += $output->writeFieldBegin('paramKeyPattern', TType::STRING, 2); + $xfer += $output->writeString($this->paramKeyPattern); + $xfer += $output->writeFieldEnd(); + } + if ($this->excludeParamKeyPattern !== null) { + $xfer += $output->writeFieldBegin('excludeParamKeyPattern', TType::BOOL, 3); + $xfer += $output->writeBool($this->excludeParamKeyPattern); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetPartitionsFilterSpec { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tblName = null; + /** + * @var bool + */ + public $withAuth = null; + /** + * @var string + */ + public $user = null; + /** + * @var string[] + */ + public $groupNames = null; + /** + * @var int + */ + public $filterMode = null; + /** + * @var string[] + */ + public $filters = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tblName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'withAuth', + 'type' => TType::BOOL, + ), + 4 => array( + 'var' => 'user', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'groupNames', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 6 => array( + 'var' => 'filterMode', + 'type' => TType::I32, + ), + 7 => array( + 'var' => 'filters', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; + } + if (isset($vals['withAuth'])) { + $this->withAuth = $vals['withAuth']; + } + if (isset($vals['user'])) { + $this->user = $vals['user']; + } + if (isset($vals['groupNames'])) { + $this->groupNames = $vals['groupNames']; + } + if (isset($vals['filterMode'])) { + $this->filterMode = $vals['filterMode']; + } + if (isset($vals['filters'])) { + $this->filters = $vals['filters']; + } + } + } + + public function getName() { + return 'GetPartitionsFilterSpec'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->withAuth); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->user); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::LST) { + $this->groupNames = array(); + $_size221 = 0; + $_etype224 = 0; + $xfer += $input->readListBegin($_etype224, $_size221); + for ($_i225 = 0; $_i225 < $_size221; ++$_i225) + { + $elem226 = null; + $xfer += $input->readString($elem226); + $this->groupNames []= $elem226; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->filterMode); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::LST) { + $this->filters = array(); + $_size227 = 0; + $_etype230 = 0; + $xfer += $input->readListBegin($_etype230, $_size227); + for ($_i231 = 0; $_i231 < $_size227; ++$_i231) + { + $elem232 = null; + $xfer += $input->readString($elem232); + $this->filters []= $elem232; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetPartitionsFilterSpec'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + if ($this->withAuth !== null) { + $xfer += $output->writeFieldBegin('withAuth', TType::BOOL, 3); + $xfer += $output->writeBool($this->withAuth); + $xfer += $output->writeFieldEnd(); + } + if ($this->user !== null) { + $xfer += $output->writeFieldBegin('user', TType::STRING, 4); + $xfer += $output->writeString($this->user); + $xfer += $output->writeFieldEnd(); + } + if ($this->groupNames !== null) { + if (!is_array($this->groupNames)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('groupNames', TType::LST, 5); + { + $output->writeListBegin(TType::STRING, count($this->groupNames)); + { + foreach ($this->groupNames as $iter233) + { + $xfer += $output->writeString($iter233); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->filterMode !== null) { + $xfer += $output->writeFieldBegin('filterMode', TType::I32, 6); + $xfer += $output->writeI32($this->filterMode); + $xfer += $output->writeFieldEnd(); + } + if ($this->filters !== null) { + if (!is_array($this->filters)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('filters', TType::LST, 7); + { + $output->writeListBegin(TType::STRING, count($this->filters)); + { + foreach ($this->filters as $iter234) + { + $xfer += $output->writeString($iter234); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetPartitionsResponse { + static $_TSPEC; + + /** + * @var \metastore\PartitionSpec[] + */ + public $partitionSpec = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'partitionSpec', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\PartitionSpec', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['partitionSpec'])) { + $this->partitionSpec = $vals['partitionSpec']; + } + } + } + + public function getName() { + return 'GetPartitionsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->partitionSpec = array(); + $_size235 = 0; + $_etype238 = 0; + $xfer += $input->readListBegin($_etype238, $_size235); + for ($_i239 = 0; $_i239 < $_size235; ++$_i239) + { + $elem240 = null; + $elem240 = new \metastore\PartitionSpec(); + $xfer += $elem240->read($input); + $this->partitionSpec []= $elem240; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetPartitionsResponse'); + if ($this->partitionSpec !== null) { + if (!is_array($this->partitionSpec)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('partitionSpec', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->partitionSpec)); + { + foreach ($this->partitionSpec as $iter241) + { + $xfer += $iter241->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetPartitionsRequest { + static $_TSPEC; + + /** + * @var \metastore\GetPartitionsProjectSpec + */ + public $projectionSpec = null; + /** + * @var \metastore\GetPartitionsFilterSpec + */ + public $filterSpec = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'projectionSpec', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetPartitionsProjectSpec', + ), + 2 => array( + 'var' => 'filterSpec', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetPartitionsFilterSpec', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['projectionSpec'])) { + $this->projectionSpec = $vals['projectionSpec']; + } + if (isset($vals['filterSpec'])) { + $this->filterSpec = $vals['filterSpec']; + } + } + } + + public function getName() { + return 'GetPartitionsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->projectionSpec = new \metastore\GetPartitionsProjectSpec(); + $xfer += $this->projectionSpec->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->filterSpec = new \metastore\GetPartitionsFilterSpec(); + $xfer += $this->filterSpec->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetPartitionsRequest'); + if ($this->projectionSpec !== null) { + if (!is_object($this->projectionSpec)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('projectionSpec', TType::STRUCT, 1); + $xfer += $this->projectionSpec->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->filterSpec !== null) { + if (!is_object($this->filterSpec)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('filterSpec', TType::STRUCT, 2); + $xfer += $this->filterSpec->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class PartitionWithoutSD { static $_TSPEC; @@ -7763,14 +8397,14 @@ class PartitionWithoutSD { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size214 = 0; - $_etype217 = 0; - $xfer += $input->readListBegin($_etype217, $_size214); - for ($_i218 = 0; $_i218 < $_size214; ++$_i218) + $_size242 = 0; + $_etype245 = 0; + $xfer += $input->readListBegin($_etype245, $_size242); + for ($_i246 = 0; $_i246 < $_size242; ++$_i246) { - $elem219 = null; - $xfer += $input->readString($elem219); - $this->values []= $elem219; + $elem247 = null; + $xfer += $input->readString($elem247); + $this->values []= $elem247; } $xfer += $input->readListEnd(); } else { @@ -7801,17 +8435,17 @@ class PartitionWithoutSD { case 5: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size220 = 0; - $_ktype221 = 0; - $_vtype222 = 0; - $xfer += $input->readMapBegin($_ktype221, $_vtype222, $_size220); - for ($_i224 = 0; $_i224 < $_size220; ++$_i224) + $_size248 = 0; + $_ktype249 = 0; + $_vtype250 = 0; + $xfer += $input->readMapBegin($_ktype249, $_vtype250, $_size248); + for ($_i252 = 0; $_i252 < $_size248; ++$_i252) { - $key225 = ''; - $val226 = ''; - $xfer += $input->readString($key225); - $xfer += $input->readString($val226); - $this->parameters[$key225] = $val226; + $key253 = ''; + $val254 = ''; + $xfer += $input->readString($key253); + $xfer += $input->readString($val254); + $this->parameters[$key253] = $val254; } $xfer += $input->readMapEnd(); } else { @@ -7847,9 +8481,9 @@ class PartitionWithoutSD { { $output->writeListBegin(TType::STRING, count($this->values)); { - foreach ($this->values as $iter227) + foreach ($this->values as $iter255) { - $xfer += $output->writeString($iter227); + $xfer += $output->writeString($iter255); } } $output->writeListEnd(); @@ -7879,10 +8513,10 @@ class PartitionWithoutSD { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter228 => $viter229) + foreach ($this->parameters as $kiter256 => $viter257) { - $xfer += $output->writeString($kiter228); - $xfer += $output->writeString($viter229); + $xfer += $output->writeString($kiter256); + $xfer += $output->writeString($viter257); } } $output->writeMapEnd(); @@ -7967,15 +8601,15 @@ class PartitionSpecWithSharedSD { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size230 = 0; - $_etype233 = 0; - $xfer += $input->readListBegin($_etype233, $_size230); - for ($_i234 = 0; $_i234 < $_size230; ++$_i234) + $_size258 = 0; + $_etype261 = 0; + $xfer += $input->readListBegin($_etype261, $_size258); + for ($_i262 = 0; $_i262 < $_size258; ++$_i262) { - $elem235 = null; - $elem235 = new \metastore\PartitionWithoutSD(); - $xfer += $elem235->read($input); - $this->partitions []= $elem235; + $elem263 = null; + $elem263 = new \metastore\PartitionWithoutSD(); + $xfer += $elem263->read($input); + $this->partitions []= $elem263; } $xfer += $input->readListEnd(); } else { @@ -8011,9 +8645,9 @@ class PartitionSpecWithSharedSD { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter236) + foreach ($this->partitions as $iter264) { - $xfer += $iter236->write($output); + $xfer += $iter264->write($output); } } $output->writeListEnd(); @@ -8086,15 +8720,15 @@ class PartitionListComposingSpec { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size237 = 0; - $_etype240 = 0; - $xfer += $input->readListBegin($_etype240, $_size237); - for ($_i241 = 0; $_i241 < $_size237; ++$_i241) + $_size265 = 0; + $_etype268 = 0; + $xfer += $input->readListBegin($_etype268, $_size265); + for ($_i269 = 0; $_i269 < $_size265; ++$_i269) { - $elem242 = null; - $elem242 = new \metastore\Partition(); - $xfer += $elem242->read($input); - $this->partitions []= $elem242; + $elem270 = null; + $elem270 = new \metastore\Partition(); + $xfer += $elem270->read($input); + $this->partitions []= $elem270; } $xfer += $input->readListEnd(); } else { @@ -8122,9 +8756,9 @@ class PartitionListComposingSpec { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter243) + foreach ($this->partitions as $iter271) { - $xfer += $iter243->write($output); + $xfer += $iter271->write($output); } } $output->writeListEnd(); @@ -10346,15 +10980,15 @@ class ColumnStatistics { case 2: if ($ftype == TType::LST) { $this->statsObj = array(); - $_size244 = 0; - $_etype247 = 0; - $xfer += $input->readListBegin($_etype247, $_size244); - for ($_i248 = 0; $_i248 < $_size244; ++$_i248) + $_size272 = 0; + $_etype275 = 0; + $xfer += $input->readListBegin($_etype275, $_size272); + for ($_i276 = 0; $_i276 < $_size272; ++$_i276) { - $elem249 = null; - $elem249 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem249->read($input); - $this->statsObj []= $elem249; + $elem277 = null; + $elem277 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem277->read($input); + $this->statsObj []= $elem277; } $xfer += $input->readListEnd(); } else { @@ -10397,9 +11031,9 @@ class ColumnStatistics { { $output->writeListBegin(TType::STRUCT, count($this->statsObj)); { - foreach ($this->statsObj as $iter250) + foreach ($this->statsObj as $iter278) { - $xfer += $iter250->write($output); + $xfer += $iter278->write($output); } } $output->writeListEnd(); @@ -10491,15 +11125,15 @@ class AggrStats { case 1: if ($ftype == TType::LST) { $this->colStats = array(); - $_size251 = 0; - $_etype254 = 0; - $xfer += $input->readListBegin($_etype254, $_size251); - for ($_i255 = 0; $_i255 < $_size251; ++$_i255) + $_size279 = 0; + $_etype282 = 0; + $xfer += $input->readListBegin($_etype282, $_size279); + for ($_i283 = 0; $_i283 < $_size279; ++$_i283) { - $elem256 = null; - $elem256 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem256->read($input); - $this->colStats []= $elem256; + $elem284 = null; + $elem284 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem284->read($input); + $this->colStats []= $elem284; } $xfer += $input->readListEnd(); } else { @@ -10541,9 +11175,9 @@ class AggrStats { { $output->writeListBegin(TType::STRUCT, count($this->colStats)); { - foreach ($this->colStats as $iter257) + foreach ($this->colStats as $iter285) { - $xfer += $iter257->write($output); + $xfer += $iter285->write($output); } } $output->writeListEnd(); @@ -10651,15 +11285,15 @@ class SetPartitionsStatsRequest { case 1: if ($ftype == TType::LST) { $this->colStats = array(); - $_size258 = 0; - $_etype261 = 0; - $xfer += $input->readListBegin($_etype261, $_size258); - for ($_i262 = 0; $_i262 < $_size258; ++$_i262) + $_size286 = 0; + $_etype289 = 0; + $xfer += $input->readListBegin($_etype289, $_size286); + for ($_i290 = 0; $_i290 < $_size286; ++$_i290) { - $elem263 = null; - $elem263 = new \metastore\ColumnStatistics(); - $xfer += $elem263->read($input); - $this->colStats []= $elem263; + $elem291 = null; + $elem291 = new \metastore\ColumnStatistics(); + $xfer += $elem291->read($input); + $this->colStats []= $elem291; } $xfer += $input->readListEnd(); } else { @@ -10708,9 +11342,9 @@ class SetPartitionsStatsRequest { { $output->writeListBegin(TType::STRUCT, count($this->colStats)); { - foreach ($this->colStats as $iter264) + foreach ($this->colStats as $iter292) { - $xfer += $iter264->write($output); + $xfer += $iter292->write($output); } } $output->writeListEnd(); @@ -10884,15 +11518,15 @@ class Schema { case 1: if ($ftype == TType::LST) { $this->fieldSchemas = array(); - $_size265 = 0; - $_etype268 = 0; - $xfer += $input->readListBegin($_etype268, $_size265); - for ($_i269 = 0; $_i269 < $_size265; ++$_i269) + $_size293 = 0; + $_etype296 = 0; + $xfer += $input->readListBegin($_etype296, $_size293); + for ($_i297 = 0; $_i297 < $_size293; ++$_i297) { - $elem270 = null; - $elem270 = new \metastore\FieldSchema(); - $xfer += $elem270->read($input); - $this->fieldSchemas []= $elem270; + $elem298 = null; + $elem298 = new \metastore\FieldSchema(); + $xfer += $elem298->read($input); + $this->fieldSchemas []= $elem298; } $xfer += $input->readListEnd(); } else { @@ -10902,17 +11536,17 @@ class Schema { case 2: if ($ftype == TType::MAP) { $this->properties = array(); - $_size271 = 0; - $_ktype272 = 0; - $_vtype273 = 0; - $xfer += $input->readMapBegin($_ktype272, $_vtype273, $_size271); - for ($_i275 = 0; $_i275 < $_size271; ++$_i275) + $_size299 = 0; + $_ktype300 = 0; + $_vtype301 = 0; + $xfer += $input->readMapBegin($_ktype300, $_vtype301, $_size299); + for ($_i303 = 0; $_i303 < $_size299; ++$_i303) { - $key276 = ''; - $val277 = ''; - $xfer += $input->readString($key276); - $xfer += $input->readString($val277); - $this->properties[$key276] = $val277; + $key304 = ''; + $val305 = ''; + $xfer += $input->readString($key304); + $xfer += $input->readString($val305); + $this->properties[$key304] = $val305; } $xfer += $input->readMapEnd(); } else { @@ -10940,9 +11574,9 @@ class Schema { { $output->writeListBegin(TType::STRUCT, count($this->fieldSchemas)); { - foreach ($this->fieldSchemas as $iter278) + foreach ($this->fieldSchemas as $iter306) { - $xfer += $iter278->write($output); + $xfer += $iter306->write($output); } } $output->writeListEnd(); @@ -10957,10 +11591,10 @@ class Schema { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter279 => $viter280) + foreach ($this->properties as $kiter307 => $viter308) { - $xfer += $output->writeString($kiter279); - $xfer += $output->writeString($viter280); + $xfer += $output->writeString($kiter307); + $xfer += $output->writeString($viter308); } } $output->writeMapEnd(); @@ -11028,17 +11662,17 @@ class EnvironmentContext { case 1: if ($ftype == TType::MAP) { $this->properties = array(); - $_size281 = 0; - $_ktype282 = 0; - $_vtype283 = 0; - $xfer += $input->readMapBegin($_ktype282, $_vtype283, $_size281); - for ($_i285 = 0; $_i285 < $_size281; ++$_i285) + $_size309 = 0; + $_ktype310 = 0; + $_vtype311 = 0; + $xfer += $input->readMapBegin($_ktype310, $_vtype311, $_size309); + for ($_i313 = 0; $_i313 < $_size309; ++$_i313) { - $key286 = ''; - $val287 = ''; - $xfer += $input->readString($key286); - $xfer += $input->readString($val287); - $this->properties[$key286] = $val287; + $key314 = ''; + $val315 = ''; + $xfer += $input->readString($key314); + $xfer += $input->readString($val315); + $this->properties[$key314] = $val315; } $xfer += $input->readMapEnd(); } else { @@ -11066,10 +11700,10 @@ class EnvironmentContext { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter288 => $viter289) + foreach ($this->properties as $kiter316 => $viter317) { - $xfer += $output->writeString($kiter288); - $xfer += $output->writeString($viter289); + $xfer += $output->writeString($kiter316); + $xfer += $output->writeString($viter317); } } $output->writeMapEnd(); @@ -11255,15 +11889,15 @@ class PrimaryKeysResponse { case 1: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size290 = 0; - $_etype293 = 0; - $xfer += $input->readListBegin($_etype293, $_size290); - for ($_i294 = 0; $_i294 < $_size290; ++$_i294) + $_size318 = 0; + $_etype321 = 0; + $xfer += $input->readListBegin($_etype321, $_size318); + for ($_i322 = 0; $_i322 < $_size318; ++$_i322) { - $elem295 = null; - $elem295 = new \metastore\SQLPrimaryKey(); - $xfer += $elem295->read($input); - $this->primaryKeys []= $elem295; + $elem323 = null; + $elem323 = new \metastore\SQLPrimaryKey(); + $xfer += $elem323->read($input); + $this->primaryKeys []= $elem323; } $xfer += $input->readListEnd(); } else { @@ -11291,9 +11925,9 @@ class PrimaryKeysResponse { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter296) + foreach ($this->primaryKeys as $iter324) { - $xfer += $iter296->write($output); + $xfer += $iter324->write($output); } } $output->writeListEnd(); @@ -11525,15 +12159,15 @@ class ForeignKeysResponse { case 1: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size297 = 0; - $_etype300 = 0; - $xfer += $input->readListBegin($_etype300, $_size297); - for ($_i301 = 0; $_i301 < $_size297; ++$_i301) + $_size325 = 0; + $_etype328 = 0; + $xfer += $input->readListBegin($_etype328, $_size325); + for ($_i329 = 0; $_i329 < $_size325; ++$_i329) { - $elem302 = null; - $elem302 = new \metastore\SQLForeignKey(); - $xfer += $elem302->read($input); - $this->foreignKeys []= $elem302; + $elem330 = null; + $elem330 = new \metastore\SQLForeignKey(); + $xfer += $elem330->read($input); + $this->foreignKeys []= $elem330; } $xfer += $input->readListEnd(); } else { @@ -11561,9 +12195,9 @@ class ForeignKeysResponse { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter303) + foreach ($this->foreignKeys as $iter331) { - $xfer += $iter303->write($output); + $xfer += $iter331->write($output); } } $output->writeListEnd(); @@ -11749,15 +12383,15 @@ class UniqueConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size304 = 0; - $_etype307 = 0; - $xfer += $input->readListBegin($_etype307, $_size304); - for ($_i308 = 0; $_i308 < $_size304; ++$_i308) + $_size332 = 0; + $_etype335 = 0; + $xfer += $input->readListBegin($_etype335, $_size332); + for ($_i336 = 0; $_i336 < $_size332; ++$_i336) { - $elem309 = null; - $elem309 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem309->read($input); - $this->uniqueConstraints []= $elem309; + $elem337 = null; + $elem337 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem337->read($input); + $this->uniqueConstraints []= $elem337; } $xfer += $input->readListEnd(); } else { @@ -11785,9 +12419,9 @@ class UniqueConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter310) + foreach ($this->uniqueConstraints as $iter338) { - $xfer += $iter310->write($output); + $xfer += $iter338->write($output); } } $output->writeListEnd(); @@ -11973,15 +12607,15 @@ class NotNullConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size311 = 0; - $_etype314 = 0; - $xfer += $input->readListBegin($_etype314, $_size311); - for ($_i315 = 0; $_i315 < $_size311; ++$_i315) + $_size339 = 0; + $_etype342 = 0; + $xfer += $input->readListBegin($_etype342, $_size339); + for ($_i343 = 0; $_i343 < $_size339; ++$_i343) { - $elem316 = null; - $elem316 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem316->read($input); - $this->notNullConstraints []= $elem316; + $elem344 = null; + $elem344 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem344->read($input); + $this->notNullConstraints []= $elem344; } $xfer += $input->readListEnd(); } else { @@ -12009,9 +12643,9 @@ class NotNullConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter317) + foreach ($this->notNullConstraints as $iter345) { - $xfer += $iter317->write($output); + $xfer += $iter345->write($output); } } $output->writeListEnd(); @@ -12197,15 +12831,15 @@ class DefaultConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size318 = 0; - $_etype321 = 0; - $xfer += $input->readListBegin($_etype321, $_size318); - for ($_i322 = 0; $_i322 < $_size318; ++$_i322) + $_size346 = 0; + $_etype349 = 0; + $xfer += $input->readListBegin($_etype349, $_size346); + for ($_i350 = 0; $_i350 < $_size346; ++$_i350) { - $elem323 = null; - $elem323 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem323->read($input); - $this->defaultConstraints []= $elem323; + $elem351 = null; + $elem351 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem351->read($input); + $this->defaultConstraints []= $elem351; } $xfer += $input->readListEnd(); } else { @@ -12233,9 +12867,9 @@ class DefaultConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter324) + foreach ($this->defaultConstraints as $iter352) { - $xfer += $iter324->write($output); + $xfer += $iter352->write($output); } } $output->writeListEnd(); @@ -12421,15 +13055,15 @@ class CheckConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size325 = 0; - $_etype328 = 0; - $xfer += $input->readListBegin($_etype328, $_size325); - for ($_i329 = 0; $_i329 < $_size325; ++$_i329) + $_size353 = 0; + $_etype356 = 0; + $xfer += $input->readListBegin($_etype356, $_size353); + for ($_i357 = 0; $_i357 < $_size353; ++$_i357) { - $elem330 = null; - $elem330 = new \metastore\SQLCheckConstraint(); - $xfer += $elem330->read($input); - $this->checkConstraints []= $elem330; + $elem358 = null; + $elem358 = new \metastore\SQLCheckConstraint(); + $xfer += $elem358->read($input); + $this->checkConstraints []= $elem358; } $xfer += $input->readListEnd(); } else { @@ -12457,9 +13091,9 @@ class CheckConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter331) + foreach ($this->checkConstraints as $iter359) { - $xfer += $iter331->write($output); + $xfer += $iter359->write($output); } } $output->writeListEnd(); @@ -12668,15 +13302,15 @@ class AddPrimaryKeyRequest { case 1: if ($ftype == TType::LST) { $this->primaryKeyCols = array(); - $_size332 = 0; - $_etype335 = 0; - $xfer += $input->readListBegin($_etype335, $_size332); - for ($_i336 = 0; $_i336 < $_size332; ++$_i336) + $_size360 = 0; + $_etype363 = 0; + $xfer += $input->readListBegin($_etype363, $_size360); + for ($_i364 = 0; $_i364 < $_size360; ++$_i364) { - $elem337 = null; - $elem337 = new \metastore\SQLPrimaryKey(); - $xfer += $elem337->read($input); - $this->primaryKeyCols []= $elem337; + $elem365 = null; + $elem365 = new \metastore\SQLPrimaryKey(); + $xfer += $elem365->read($input); + $this->primaryKeyCols []= $elem365; } $xfer += $input->readListEnd(); } else { @@ -12704,9 +13338,9 @@ class AddPrimaryKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeyCols)); { - foreach ($this->primaryKeyCols as $iter338) + foreach ($this->primaryKeyCols as $iter366) { - $xfer += $iter338->write($output); + $xfer += $iter366->write($output); } } $output->writeListEnd(); @@ -12771,15 +13405,15 @@ class AddForeignKeyRequest { case 1: if ($ftype == TType::LST) { $this->foreignKeyCols = array(); - $_size339 = 0; - $_etype342 = 0; - $xfer += $input->readListBegin($_etype342, $_size339); - for ($_i343 = 0; $_i343 < $_size339; ++$_i343) + $_size367 = 0; + $_etype370 = 0; + $xfer += $input->readListBegin($_etype370, $_size367); + for ($_i371 = 0; $_i371 < $_size367; ++$_i371) { - $elem344 = null; - $elem344 = new \metastore\SQLForeignKey(); - $xfer += $elem344->read($input); - $this->foreignKeyCols []= $elem344; + $elem372 = null; + $elem372 = new \metastore\SQLForeignKey(); + $xfer += $elem372->read($input); + $this->foreignKeyCols []= $elem372; } $xfer += $input->readListEnd(); } else { @@ -12807,9 +13441,9 @@ class AddForeignKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeyCols)); { - foreach ($this->foreignKeyCols as $iter345) + foreach ($this->foreignKeyCols as $iter373) { - $xfer += $iter345->write($output); + $xfer += $iter373->write($output); } } $output->writeListEnd(); @@ -12874,15 +13508,15 @@ class AddUniqueConstraintRequest { case 1: if ($ftype == TType::LST) { $this->uniqueConstraintCols = array(); - $_size346 = 0; - $_etype349 = 0; - $xfer += $input->readListBegin($_etype349, $_size346); - for ($_i350 = 0; $_i350 < $_size346; ++$_i350) + $_size374 = 0; + $_etype377 = 0; + $xfer += $input->readListBegin($_etype377, $_size374); + for ($_i378 = 0; $_i378 < $_size374; ++$_i378) { - $elem351 = null; - $elem351 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem351->read($input); - $this->uniqueConstraintCols []= $elem351; + $elem379 = null; + $elem379 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem379->read($input); + $this->uniqueConstraintCols []= $elem379; } $xfer += $input->readListEnd(); } else { @@ -12910,9 +13544,9 @@ class AddUniqueConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraintCols)); { - foreach ($this->uniqueConstraintCols as $iter352) + foreach ($this->uniqueConstraintCols as $iter380) { - $xfer += $iter352->write($output); + $xfer += $iter380->write($output); } } $output->writeListEnd(); @@ -12977,15 +13611,15 @@ class AddNotNullConstraintRequest { case 1: if ($ftype == TType::LST) { $this->notNullConstraintCols = array(); - $_size353 = 0; - $_etype356 = 0; - $xfer += $input->readListBegin($_etype356, $_size353); - for ($_i357 = 0; $_i357 < $_size353; ++$_i357) + $_size381 = 0; + $_etype384 = 0; + $xfer += $input->readListBegin($_etype384, $_size381); + for ($_i385 = 0; $_i385 < $_size381; ++$_i385) { - $elem358 = null; - $elem358 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem358->read($input); - $this->notNullConstraintCols []= $elem358; + $elem386 = null; + $elem386 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem386->read($input); + $this->notNullConstraintCols []= $elem386; } $xfer += $input->readListEnd(); } else { @@ -13013,9 +13647,9 @@ class AddNotNullConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraintCols)); { - foreach ($this->notNullConstraintCols as $iter359) + foreach ($this->notNullConstraintCols as $iter387) { - $xfer += $iter359->write($output); + $xfer += $iter387->write($output); } } $output->writeListEnd(); @@ -13080,15 +13714,15 @@ class AddDefaultConstraintRequest { case 1: if ($ftype == TType::LST) { $this->defaultConstraintCols = array(); - $_size360 = 0; - $_etype363 = 0; - $xfer += $input->readListBegin($_etype363, $_size360); - for ($_i364 = 0; $_i364 < $_size360; ++$_i364) + $_size388 = 0; + $_etype391 = 0; + $xfer += $input->readListBegin($_etype391, $_size388); + for ($_i392 = 0; $_i392 < $_size388; ++$_i392) { - $elem365 = null; - $elem365 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem365->read($input); - $this->defaultConstraintCols []= $elem365; + $elem393 = null; + $elem393 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem393->read($input); + $this->defaultConstraintCols []= $elem393; } $xfer += $input->readListEnd(); } else { @@ -13116,9 +13750,9 @@ class AddDefaultConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraintCols)); { - foreach ($this->defaultConstraintCols as $iter366) + foreach ($this->defaultConstraintCols as $iter394) { - $xfer += $iter366->write($output); + $xfer += $iter394->write($output); } } $output->writeListEnd(); @@ -13183,15 +13817,15 @@ class AddCheckConstraintRequest { case 1: if ($ftype == TType::LST) { $this->checkConstraintCols = array(); - $_size367 = 0; - $_etype370 = 0; - $xfer += $input->readListBegin($_etype370, $_size367); - for ($_i371 = 0; $_i371 < $_size367; ++$_i371) + $_size395 = 0; + $_etype398 = 0; + $xfer += $input->readListBegin($_etype398, $_size395); + for ($_i399 = 0; $_i399 < $_size395; ++$_i399) { - $elem372 = null; - $elem372 = new \metastore\SQLCheckConstraint(); - $xfer += $elem372->read($input); - $this->checkConstraintCols []= $elem372; + $elem400 = null; + $elem400 = new \metastore\SQLCheckConstraint(); + $xfer += $elem400->read($input); + $this->checkConstraintCols []= $elem400; } $xfer += $input->readListEnd(); } else { @@ -13219,9 +13853,9 @@ class AddCheckConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraintCols)); { - foreach ($this->checkConstraintCols as $iter373) + foreach ($this->checkConstraintCols as $iter401) { - $xfer += $iter373->write($output); + $xfer += $iter401->write($output); } } $output->writeListEnd(); @@ -13297,15 +13931,15 @@ class PartitionsByExprResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size374 = 0; - $_etype377 = 0; - $xfer += $input->readListBegin($_etype377, $_size374); - for ($_i378 = 0; $_i378 < $_size374; ++$_i378) + $_size402 = 0; + $_etype405 = 0; + $xfer += $input->readListBegin($_etype405, $_size402); + for ($_i406 = 0; $_i406 < $_size402; ++$_i406) { - $elem379 = null; - $elem379 = new \metastore\Partition(); - $xfer += $elem379->read($input); - $this->partitions []= $elem379; + $elem407 = null; + $elem407 = new \metastore\Partition(); + $xfer += $elem407->read($input); + $this->partitions []= $elem407; } $xfer += $input->readListEnd(); } else { @@ -13340,9 +13974,9 @@ class PartitionsByExprResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter380) + foreach ($this->partitions as $iter408) { - $xfer += $iter380->write($output); + $xfer += $iter408->write($output); } } $output->writeListEnd(); @@ -13613,15 +14247,15 @@ class TableStatsResult { case 1: if ($ftype == TType::LST) { $this->tableStats = array(); - $_size381 = 0; - $_etype384 = 0; - $xfer += $input->readListBegin($_etype384, $_size381); - for ($_i385 = 0; $_i385 < $_size381; ++$_i385) + $_size409 = 0; + $_etype412 = 0; + $xfer += $input->readListBegin($_etype412, $_size409); + for ($_i413 = 0; $_i413 < $_size409; ++$_i413) { - $elem386 = null; - $elem386 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem386->read($input); - $this->tableStats []= $elem386; + $elem414 = null; + $elem414 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem414->read($input); + $this->tableStats []= $elem414; } $xfer += $input->readListEnd(); } else { @@ -13656,9 +14290,9 @@ class TableStatsResult { { $output->writeListBegin(TType::STRUCT, count($this->tableStats)); { - foreach ($this->tableStats as $iter387) + foreach ($this->tableStats as $iter415) { - $xfer += $iter387->write($output); + $xfer += $iter415->write($output); } } $output->writeListEnd(); @@ -13747,28 +14381,28 @@ class PartitionsStatsResult { case 1: if ($ftype == TType::MAP) { $this->partStats = array(); - $_size388 = 0; - $_ktype389 = 0; - $_vtype390 = 0; - $xfer += $input->readMapBegin($_ktype389, $_vtype390, $_size388); - for ($_i392 = 0; $_i392 < $_size388; ++$_i392) + $_size416 = 0; + $_ktype417 = 0; + $_vtype418 = 0; + $xfer += $input->readMapBegin($_ktype417, $_vtype418, $_size416); + for ($_i420 = 0; $_i420 < $_size416; ++$_i420) { - $key393 = ''; - $val394 = array(); - $xfer += $input->readString($key393); - $val394 = array(); - $_size395 = 0; - $_etype398 = 0; - $xfer += $input->readListBegin($_etype398, $_size395); - for ($_i399 = 0; $_i399 < $_size395; ++$_i399) + $key421 = ''; + $val422 = array(); + $xfer += $input->readString($key421); + $val422 = array(); + $_size423 = 0; + $_etype426 = 0; + $xfer += $input->readListBegin($_etype426, $_size423); + for ($_i427 = 0; $_i427 < $_size423; ++$_i427) { - $elem400 = null; - $elem400 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem400->read($input); - $val394 []= $elem400; + $elem428 = null; + $elem428 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem428->read($input); + $val422 []= $elem428; } $xfer += $input->readListEnd(); - $this->partStats[$key393] = $val394; + $this->partStats[$key421] = $val422; } $xfer += $input->readMapEnd(); } else { @@ -13803,15 +14437,15 @@ class PartitionsStatsResult { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->partStats)); { - foreach ($this->partStats as $kiter401 => $viter402) + foreach ($this->partStats as $kiter429 => $viter430) { - $xfer += $output->writeString($kiter401); + $xfer += $output->writeString($kiter429); { - $output->writeListBegin(TType::STRUCT, count($viter402)); + $output->writeListBegin(TType::STRUCT, count($viter430)); { - foreach ($viter402 as $iter403) + foreach ($viter430 as $iter431) { - $xfer += $iter403->write($output); + $xfer += $iter431->write($output); } } $output->writeListEnd(); @@ -13942,14 +14576,14 @@ class TableStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size404 = 0; - $_etype407 = 0; - $xfer += $input->readListBegin($_etype407, $_size404); - for ($_i408 = 0; $_i408 < $_size404; ++$_i408) + $_size432 = 0; + $_etype435 = 0; + $xfer += $input->readListBegin($_etype435, $_size432); + for ($_i436 = 0; $_i436 < $_size432; ++$_i436) { - $elem409 = null; - $xfer += $input->readString($elem409); - $this->colNames []= $elem409; + $elem437 = null; + $xfer += $input->readString($elem437); + $this->colNames []= $elem437; } $xfer += $input->readListEnd(); } else { @@ -14001,9 +14635,9 @@ class TableStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter410) + foreach ($this->colNames as $iter438) { - $xfer += $output->writeString($iter410); + $xfer += $output->writeString($iter438); } } $output->writeListEnd(); @@ -14150,14 +14784,14 @@ class PartitionsStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size411 = 0; - $_etype414 = 0; - $xfer += $input->readListBegin($_etype414, $_size411); - for ($_i415 = 0; $_i415 < $_size411; ++$_i415) + $_size439 = 0; + $_etype442 = 0; + $xfer += $input->readListBegin($_etype442, $_size439); + for ($_i443 = 0; $_i443 < $_size439; ++$_i443) { - $elem416 = null; - $xfer += $input->readString($elem416); - $this->colNames []= $elem416; + $elem444 = null; + $xfer += $input->readString($elem444); + $this->colNames []= $elem444; } $xfer += $input->readListEnd(); } else { @@ -14167,14 +14801,14 @@ class PartitionsStatsRequest { case 4: if ($ftype == TType::LST) { $this->partNames = array(); - $_size417 = 0; - $_etype420 = 0; - $xfer += $input->readListBegin($_etype420, $_size417); - for ($_i421 = 0; $_i421 < $_size417; ++$_i421) + $_size445 = 0; + $_etype448 = 0; + $xfer += $input->readListBegin($_etype448, $_size445); + for ($_i449 = 0; $_i449 < $_size445; ++$_i449) { - $elem422 = null; - $xfer += $input->readString($elem422); - $this->partNames []= $elem422; + $elem450 = null; + $xfer += $input->readString($elem450); + $this->partNames []= $elem450; } $xfer += $input->readListEnd(); } else { @@ -14226,9 +14860,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter423) + foreach ($this->colNames as $iter451) { - $xfer += $output->writeString($iter423); + $xfer += $output->writeString($iter451); } } $output->writeListEnd(); @@ -14243,9 +14877,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter424) + foreach ($this->partNames as $iter452) { - $xfer += $output->writeString($iter424); + $xfer += $output->writeString($iter452); } } $output->writeListEnd(); @@ -14331,15 +14965,15 @@ class AddPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size425 = 0; - $_etype428 = 0; - $xfer += $input->readListBegin($_etype428, $_size425); - for ($_i429 = 0; $_i429 < $_size425; ++$_i429) + $_size453 = 0; + $_etype456 = 0; + $xfer += $input->readListBegin($_etype456, $_size453); + for ($_i457 = 0; $_i457 < $_size453; ++$_i457) { - $elem430 = null; - $elem430 = new \metastore\Partition(); - $xfer += $elem430->read($input); - $this->partitions []= $elem430; + $elem458 = null; + $elem458 = new \metastore\Partition(); + $xfer += $elem458->read($input); + $this->partitions []= $elem458; } $xfer += $input->readListEnd(); } else { @@ -14374,9 +15008,9 @@ class AddPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter431) + foreach ($this->partitions as $iter459) { - $xfer += $iter431->write($output); + $xfer += $iter459->write($output); } } $output->writeListEnd(); @@ -14526,15 +15160,15 @@ class AddPartitionsRequest { case 3: if ($ftype == TType::LST) { $this->parts = array(); - $_size432 = 0; - $_etype435 = 0; - $xfer += $input->readListBegin($_etype435, $_size432); - for ($_i436 = 0; $_i436 < $_size432; ++$_i436) + $_size460 = 0; + $_etype463 = 0; + $xfer += $input->readListBegin($_etype463, $_size460); + for ($_i464 = 0; $_i464 < $_size460; ++$_i464) { - $elem437 = null; - $elem437 = new \metastore\Partition(); - $xfer += $elem437->read($input); - $this->parts []= $elem437; + $elem465 = null; + $elem465 = new \metastore\Partition(); + $xfer += $elem465->read($input); + $this->parts []= $elem465; } $xfer += $input->readListEnd(); } else { @@ -14600,9 +15234,9 @@ class AddPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->parts)); { - foreach ($this->parts as $iter438) + foreach ($this->parts as $iter466) { - $xfer += $iter438->write($output); + $xfer += $iter466->write($output); } } $output->writeListEnd(); @@ -14687,15 +15321,15 @@ class DropPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size439 = 0; - $_etype442 = 0; - $xfer += $input->readListBegin($_etype442, $_size439); - for ($_i443 = 0; $_i443 < $_size439; ++$_i443) + $_size467 = 0; + $_etype470 = 0; + $xfer += $input->readListBegin($_etype470, $_size467); + for ($_i471 = 0; $_i471 < $_size467; ++$_i471) { - $elem444 = null; - $elem444 = new \metastore\Partition(); - $xfer += $elem444->read($input); - $this->partitions []= $elem444; + $elem472 = null; + $elem472 = new \metastore\Partition(); + $xfer += $elem472->read($input); + $this->partitions []= $elem472; } $xfer += $input->readListEnd(); } else { @@ -14723,9 +15357,9 @@ class DropPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter445) + foreach ($this->partitions as $iter473) { - $xfer += $iter445->write($output); + $xfer += $iter473->write($output); } } $output->writeListEnd(); @@ -14903,14 +15537,14 @@ class RequestPartsSpec { case 1: if ($ftype == TType::LST) { $this->names = array(); - $_size446 = 0; - $_etype449 = 0; - $xfer += $input->readListBegin($_etype449, $_size446); - for ($_i450 = 0; $_i450 < $_size446; ++$_i450) + $_size474 = 0; + $_etype477 = 0; + $xfer += $input->readListBegin($_etype477, $_size474); + for ($_i478 = 0; $_i478 < $_size474; ++$_i478) { - $elem451 = null; - $xfer += $input->readString($elem451); - $this->names []= $elem451; + $elem479 = null; + $xfer += $input->readString($elem479); + $this->names []= $elem479; } $xfer += $input->readListEnd(); } else { @@ -14920,15 +15554,15 @@ class RequestPartsSpec { case 2: if ($ftype == TType::LST) { $this->exprs = array(); - $_size452 = 0; - $_etype455 = 0; - $xfer += $input->readListBegin($_etype455, $_size452); - for ($_i456 = 0; $_i456 < $_size452; ++$_i456) + $_size480 = 0; + $_etype483 = 0; + $xfer += $input->readListBegin($_etype483, $_size480); + for ($_i484 = 0; $_i484 < $_size480; ++$_i484) { - $elem457 = null; - $elem457 = new \metastore\DropPartitionsExpr(); - $xfer += $elem457->read($input); - $this->exprs []= $elem457; + $elem485 = null; + $elem485 = new \metastore\DropPartitionsExpr(); + $xfer += $elem485->read($input); + $this->exprs []= $elem485; } $xfer += $input->readListEnd(); } else { @@ -14956,9 +15590,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter458) + foreach ($this->names as $iter486) { - $xfer += $output->writeString($iter458); + $xfer += $output->writeString($iter486); } } $output->writeListEnd(); @@ -14973,9 +15607,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRUCT, count($this->exprs)); { - foreach ($this->exprs as $iter459) + foreach ($this->exprs as $iter487) { - $xfer += $iter459->write($output); + $xfer += $iter487->write($output); } } $output->writeListEnd(); @@ -15416,15 +16050,15 @@ class PartitionValuesRequest { case 3: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size460 = 0; - $_etype463 = 0; - $xfer += $input->readListBegin($_etype463, $_size460); - for ($_i464 = 0; $_i464 < $_size460; ++$_i464) + $_size488 = 0; + $_etype491 = 0; + $xfer += $input->readListBegin($_etype491, $_size488); + for ($_i492 = 0; $_i492 < $_size488; ++$_i492) { - $elem465 = null; - $elem465 = new \metastore\FieldSchema(); - $xfer += $elem465->read($input); - $this->partitionKeys []= $elem465; + $elem493 = null; + $elem493 = new \metastore\FieldSchema(); + $xfer += $elem493->read($input); + $this->partitionKeys []= $elem493; } $xfer += $input->readListEnd(); } else { @@ -15448,15 +16082,15 @@ class PartitionValuesRequest { case 6: if ($ftype == TType::LST) { $this->partitionOrder = array(); - $_size466 = 0; - $_etype469 = 0; - $xfer += $input->readListBegin($_etype469, $_size466); - for ($_i470 = 0; $_i470 < $_size466; ++$_i470) + $_size494 = 0; + $_etype497 = 0; + $xfer += $input->readListBegin($_etype497, $_size494); + for ($_i498 = 0; $_i498 < $_size494; ++$_i498) { - $elem471 = null; - $elem471 = new \metastore\FieldSchema(); - $xfer += $elem471->read($input); - $this->partitionOrder []= $elem471; + $elem499 = null; + $elem499 = new \metastore\FieldSchema(); + $xfer += $elem499->read($input); + $this->partitionOrder []= $elem499; } $xfer += $input->readListEnd(); } else { @@ -15515,9 +16149,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter472) + foreach ($this->partitionKeys as $iter500) { - $xfer += $iter472->write($output); + $xfer += $iter500->write($output); } } $output->writeListEnd(); @@ -15542,9 +16176,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionOrder)); { - foreach ($this->partitionOrder as $iter473) + foreach ($this->partitionOrder as $iter501) { - $xfer += $iter473->write($output); + $xfer += $iter501->write($output); } } $output->writeListEnd(); @@ -15623,14 +16257,14 @@ class PartitionValuesRow { case 1: if ($ftype == TType::LST) { $this->row = array(); - $_size474 = 0; - $_etype477 = 0; - $xfer += $input->readListBegin($_etype477, $_size474); - for ($_i478 = 0; $_i478 < $_size474; ++$_i478) + $_size502 = 0; + $_etype505 = 0; + $xfer += $input->readListBegin($_etype505, $_size502); + for ($_i506 = 0; $_i506 < $_size502; ++$_i506) { - $elem479 = null; - $xfer += $input->readString($elem479); - $this->row []= $elem479; + $elem507 = null; + $xfer += $input->readString($elem507); + $this->row []= $elem507; } $xfer += $input->readListEnd(); } else { @@ -15658,9 +16292,9 @@ class PartitionValuesRow { { $output->writeListBegin(TType::STRING, count($this->row)); { - foreach ($this->row as $iter480) + foreach ($this->row as $iter508) { - $xfer += $output->writeString($iter480); + $xfer += $output->writeString($iter508); } } $output->writeListEnd(); @@ -15725,15 +16359,15 @@ class PartitionValuesResponse { case 1: if ($ftype == TType::LST) { $this->partitionValues = array(); - $_size481 = 0; - $_etype484 = 0; - $xfer += $input->readListBegin($_etype484, $_size481); - for ($_i485 = 0; $_i485 < $_size481; ++$_i485) + $_size509 = 0; + $_etype512 = 0; + $xfer += $input->readListBegin($_etype512, $_size509); + for ($_i513 = 0; $_i513 < $_size509; ++$_i513) { - $elem486 = null; - $elem486 = new \metastore\PartitionValuesRow(); - $xfer += $elem486->read($input); - $this->partitionValues []= $elem486; + $elem514 = null; + $elem514 = new \metastore\PartitionValuesRow(); + $xfer += $elem514->read($input); + $this->partitionValues []= $elem514; } $xfer += $input->readListEnd(); } else { @@ -15761,9 +16395,9 @@ class PartitionValuesResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionValues)); { - foreach ($this->partitionValues as $iter487) + foreach ($this->partitionValues as $iter515) { - $xfer += $iter487->write($output); + $xfer += $iter515->write($output); } } $output->writeListEnd(); @@ -16063,15 +16697,15 @@ class Function { case 8: if ($ftype == TType::LST) { $this->resourceUris = array(); - $_size488 = 0; - $_etype491 = 0; - $xfer += $input->readListBegin($_etype491, $_size488); - for ($_i492 = 0; $_i492 < $_size488; ++$_i492) + $_size516 = 0; + $_etype519 = 0; + $xfer += $input->readListBegin($_etype519, $_size516); + for ($_i520 = 0; $_i520 < $_size516; ++$_i520) { - $elem493 = null; - $elem493 = new \metastore\ResourceUri(); - $xfer += $elem493->read($input); - $this->resourceUris []= $elem493; + $elem521 = null; + $elem521 = new \metastore\ResourceUri(); + $xfer += $elem521->read($input); + $this->resourceUris []= $elem521; } $xfer += $input->readListEnd(); } else { @@ -16141,9 +16775,9 @@ class Function { { $output->writeListBegin(TType::STRUCT, count($this->resourceUris)); { - foreach ($this->resourceUris as $iter494) + foreach ($this->resourceUris as $iter522) { - $xfer += $iter494->write($output); + $xfer += $iter522->write($output); } } $output->writeListEnd(); @@ -16490,15 +17124,15 @@ class GetOpenTxnsInfoResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size495 = 0; - $_etype498 = 0; - $xfer += $input->readListBegin($_etype498, $_size495); - for ($_i499 = 0; $_i499 < $_size495; ++$_i499) + $_size523 = 0; + $_etype526 = 0; + $xfer += $input->readListBegin($_etype526, $_size523); + for ($_i527 = 0; $_i527 < $_size523; ++$_i527) { - $elem500 = null; - $elem500 = new \metastore\TxnInfo(); - $xfer += $elem500->read($input); - $this->open_txns []= $elem500; + $elem528 = null; + $elem528 = new \metastore\TxnInfo(); + $xfer += $elem528->read($input); + $this->open_txns []= $elem528; } $xfer += $input->readListEnd(); } else { @@ -16531,9 +17165,9 @@ class GetOpenTxnsInfoResponse { { $output->writeListBegin(TType::STRUCT, count($this->open_txns)); { - foreach ($this->open_txns as $iter501) + foreach ($this->open_txns as $iter529) { - $xfer += $iter501->write($output); + $xfer += $iter529->write($output); } } $output->writeListEnd(); @@ -16637,14 +17271,14 @@ class GetOpenTxnsResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size502 = 0; - $_etype505 = 0; - $xfer += $input->readListBegin($_etype505, $_size502); - for ($_i506 = 0; $_i506 < $_size502; ++$_i506) + $_size530 = 0; + $_etype533 = 0; + $xfer += $input->readListBegin($_etype533, $_size530); + for ($_i534 = 0; $_i534 < $_size530; ++$_i534) { - $elem507 = null; - $xfer += $input->readI64($elem507); - $this->open_txns []= $elem507; + $elem535 = null; + $xfer += $input->readI64($elem535); + $this->open_txns []= $elem535; } $xfer += $input->readListEnd(); } else { @@ -16691,9 +17325,9 @@ class GetOpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter508) + foreach ($this->open_txns as $iter536) { - $xfer += $output->writeI64($iter508); + $xfer += $output->writeI64($iter536); } } $output->writeListEnd(); @@ -16857,14 +17491,14 @@ class OpenTxnRequest { case 6: if ($ftype == TType::LST) { $this->replSrcTxnIds = array(); - $_size509 = 0; - $_etype512 = 0; - $xfer += $input->readListBegin($_etype512, $_size509); - for ($_i513 = 0; $_i513 < $_size509; ++$_i513) + $_size537 = 0; + $_etype540 = 0; + $xfer += $input->readListBegin($_etype540, $_size537); + for ($_i541 = 0; $_i541 < $_size537; ++$_i541) { - $elem514 = null; - $xfer += $input->readI64($elem514); - $this->replSrcTxnIds []= $elem514; + $elem542 = null; + $xfer += $input->readI64($elem542); + $this->replSrcTxnIds []= $elem542; } $xfer += $input->readListEnd(); } else { @@ -16917,9 +17551,9 @@ class OpenTxnRequest { { $output->writeListBegin(TType::I64, count($this->replSrcTxnIds)); { - foreach ($this->replSrcTxnIds as $iter515) + foreach ($this->replSrcTxnIds as $iter543) { - $xfer += $output->writeI64($iter515); + $xfer += $output->writeI64($iter543); } } $output->writeListEnd(); @@ -16983,14 +17617,14 @@ class OpenTxnsResponse { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size516 = 0; - $_etype519 = 0; - $xfer += $input->readListBegin($_etype519, $_size516); - for ($_i520 = 0; $_i520 < $_size516; ++$_i520) + $_size544 = 0; + $_etype547 = 0; + $xfer += $input->readListBegin($_etype547, $_size544); + for ($_i548 = 0; $_i548 < $_size544; ++$_i548) { - $elem521 = null; - $xfer += $input->readI64($elem521); - $this->txn_ids []= $elem521; + $elem549 = null; + $xfer += $input->readI64($elem549); + $this->txn_ids []= $elem549; } $xfer += $input->readListEnd(); } else { @@ -17018,9 +17652,9 @@ class OpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter522) + foreach ($this->txn_ids as $iter550) { - $xfer += $output->writeI64($iter522); + $xfer += $output->writeI64($iter550); } } $output->writeListEnd(); @@ -17182,14 +17816,14 @@ class AbortTxnsRequest { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size523 = 0; - $_etype526 = 0; - $xfer += $input->readListBegin($_etype526, $_size523); - for ($_i527 = 0; $_i527 < $_size523; ++$_i527) + $_size551 = 0; + $_etype554 = 0; + $xfer += $input->readListBegin($_etype554, $_size551); + for ($_i555 = 0; $_i555 < $_size551; ++$_i555) { - $elem528 = null; - $xfer += $input->readI64($elem528); - $this->txn_ids []= $elem528; + $elem556 = null; + $xfer += $input->readI64($elem556); + $this->txn_ids []= $elem556; } $xfer += $input->readListEnd(); } else { @@ -17217,9 +17851,9 @@ class AbortTxnsRequest { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter529) + foreach ($this->txn_ids as $iter557) { - $xfer += $output->writeI64($iter529); + $xfer += $output->writeI64($iter557); } } $output->writeListEnd(); @@ -17320,15 +17954,15 @@ class CommitTxnRequest { case 3: if ($ftype == TType::LST) { $this->writeEventInfos = array(); - $_size530 = 0; - $_etype533 = 0; - $xfer += $input->readListBegin($_etype533, $_size530); - for ($_i534 = 0; $_i534 < $_size530; ++$_i534) + $_size558 = 0; + $_etype561 = 0; + $xfer += $input->readListBegin($_etype561, $_size558); + for ($_i562 = 0; $_i562 < $_size558; ++$_i562) { - $elem535 = null; - $elem535 = new \metastore\WriteEventInfo(); - $xfer += $elem535->read($input); - $this->writeEventInfos []= $elem535; + $elem563 = null; + $elem563 = new \metastore\WriteEventInfo(); + $xfer += $elem563->read($input); + $this->writeEventInfos []= $elem563; } $xfer += $input->readListEnd(); } else { @@ -17366,9 +18000,9 @@ class CommitTxnRequest { { $output->writeListBegin(TType::STRUCT, count($this->writeEventInfos)); { - foreach ($this->writeEventInfos as $iter536) + foreach ($this->writeEventInfos as $iter564) { - $xfer += $iter536->write($output); + $xfer += $iter564->write($output); } } $output->writeListEnd(); @@ -17735,14 +18369,14 @@ class ReplTblWriteIdStateRequest { case 6: if ($ftype == TType::LST) { $this->partNames = array(); - $_size537 = 0; - $_etype540 = 0; - $xfer += $input->readListBegin($_etype540, $_size537); - for ($_i541 = 0; $_i541 < $_size537; ++$_i541) + $_size565 = 0; + $_etype568 = 0; + $xfer += $input->readListBegin($_etype568, $_size565); + for ($_i569 = 0; $_i569 < $_size565; ++$_i569) { - $elem542 = null; - $xfer += $input->readString($elem542); - $this->partNames []= $elem542; + $elem570 = null; + $xfer += $input->readString($elem570); + $this->partNames []= $elem570; } $xfer += $input->readListEnd(); } else { @@ -17795,9 +18429,9 @@ class ReplTblWriteIdStateRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter543) + foreach ($this->partNames as $iter571) { - $xfer += $output->writeString($iter543); + $xfer += $output->writeString($iter571); } } $output->writeListEnd(); @@ -17872,14 +18506,14 @@ class GetValidWriteIdsRequest { case 1: if ($ftype == TType::LST) { $this->fullTableNames = array(); - $_size544 = 0; - $_etype547 = 0; - $xfer += $input->readListBegin($_etype547, $_size544); - for ($_i548 = 0; $_i548 < $_size544; ++$_i548) + $_size572 = 0; + $_etype575 = 0; + $xfer += $input->readListBegin($_etype575, $_size572); + for ($_i576 = 0; $_i576 < $_size572; ++$_i576) { - $elem549 = null; - $xfer += $input->readString($elem549); - $this->fullTableNames []= $elem549; + $elem577 = null; + $xfer += $input->readString($elem577); + $this->fullTableNames []= $elem577; } $xfer += $input->readListEnd(); } else { @@ -17914,9 +18548,9 @@ class GetValidWriteIdsRequest { { $output->writeListBegin(TType::STRING, count($this->fullTableNames)); { - foreach ($this->fullTableNames as $iter550) + foreach ($this->fullTableNames as $iter578) { - $xfer += $output->writeString($iter550); + $xfer += $output->writeString($iter578); } } $output->writeListEnd(); @@ -18043,14 +18677,14 @@ class TableValidWriteIds { case 3: if ($ftype == TType::LST) { $this->invalidWriteIds = array(); - $_size551 = 0; - $_etype554 = 0; - $xfer += $input->readListBegin($_etype554, $_size551); - for ($_i555 = 0; $_i555 < $_size551; ++$_i555) + $_size579 = 0; + $_etype582 = 0; + $xfer += $input->readListBegin($_etype582, $_size579); + for ($_i583 = 0; $_i583 < $_size579; ++$_i583) { - $elem556 = null; - $xfer += $input->readI64($elem556); - $this->invalidWriteIds []= $elem556; + $elem584 = null; + $xfer += $input->readI64($elem584); + $this->invalidWriteIds []= $elem584; } $xfer += $input->readListEnd(); } else { @@ -18102,9 +18736,9 @@ class TableValidWriteIds { { $output->writeListBegin(TType::I64, count($this->invalidWriteIds)); { - foreach ($this->invalidWriteIds as $iter557) + foreach ($this->invalidWriteIds as $iter585) { - $xfer += $output->writeI64($iter557); + $xfer += $output->writeI64($iter585); } } $output->writeListEnd(); @@ -18179,15 +18813,15 @@ class GetValidWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->tblValidWriteIds = array(); - $_size558 = 0; - $_etype561 = 0; - $xfer += $input->readListBegin($_etype561, $_size558); - for ($_i562 = 0; $_i562 < $_size558; ++$_i562) + $_size586 = 0; + $_etype589 = 0; + $xfer += $input->readListBegin($_etype589, $_size586); + for ($_i590 = 0; $_i590 < $_size586; ++$_i590) { - $elem563 = null; - $elem563 = new \metastore\TableValidWriteIds(); - $xfer += $elem563->read($input); - $this->tblValidWriteIds []= $elem563; + $elem591 = null; + $elem591 = new \metastore\TableValidWriteIds(); + $xfer += $elem591->read($input); + $this->tblValidWriteIds []= $elem591; } $xfer += $input->readListEnd(); } else { @@ -18215,9 +18849,9 @@ class GetValidWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->tblValidWriteIds)); { - foreach ($this->tblValidWriteIds as $iter564) + foreach ($this->tblValidWriteIds as $iter592) { - $xfer += $iter564->write($output); + $xfer += $iter592->write($output); } } $output->writeListEnd(); @@ -18344,14 +18978,14 @@ class AllocateTableWriteIdsRequest { case 3: if ($ftype == TType::LST) { $this->txnIds = array(); - $_size565 = 0; - $_etype568 = 0; - $xfer += $input->readListBegin($_etype568, $_size565); - for ($_i569 = 0; $_i569 < $_size565; ++$_i569) + $_size593 = 0; + $_etype596 = 0; + $xfer += $input->readListBegin($_etype596, $_size593); + for ($_i597 = 0; $_i597 < $_size593; ++$_i597) { - $elem570 = null; - $xfer += $input->readI64($elem570); - $this->txnIds []= $elem570; + $elem598 = null; + $xfer += $input->readI64($elem598); + $this->txnIds []= $elem598; } $xfer += $input->readListEnd(); } else { @@ -18368,15 +19002,15 @@ class AllocateTableWriteIdsRequest { case 5: if ($ftype == TType::LST) { $this->srcTxnToWriteIdList = array(); - $_size571 = 0; - $_etype574 = 0; - $xfer += $input->readListBegin($_etype574, $_size571); - for ($_i575 = 0; $_i575 < $_size571; ++$_i575) + $_size599 = 0; + $_etype602 = 0; + $xfer += $input->readListBegin($_etype602, $_size599); + for ($_i603 = 0; $_i603 < $_size599; ++$_i603) { - $elem576 = null; - $elem576 = new \metastore\TxnToWriteId(); - $xfer += $elem576->read($input); - $this->srcTxnToWriteIdList []= $elem576; + $elem604 = null; + $elem604 = new \metastore\TxnToWriteId(); + $xfer += $elem604->read($input); + $this->srcTxnToWriteIdList []= $elem604; } $xfer += $input->readListEnd(); } else { @@ -18414,9 +19048,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::I64, count($this->txnIds)); { - foreach ($this->txnIds as $iter577) + foreach ($this->txnIds as $iter605) { - $xfer += $output->writeI64($iter577); + $xfer += $output->writeI64($iter605); } } $output->writeListEnd(); @@ -18436,9 +19070,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::STRUCT, count($this->srcTxnToWriteIdList)); { - foreach ($this->srcTxnToWriteIdList as $iter578) + foreach ($this->srcTxnToWriteIdList as $iter606) { - $xfer += $iter578->write($output); + $xfer += $iter606->write($output); } } $output->writeListEnd(); @@ -18601,15 +19235,15 @@ class AllocateTableWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->txnToWriteIds = array(); - $_size579 = 0; - $_etype582 = 0; - $xfer += $input->readListBegin($_etype582, $_size579); - for ($_i583 = 0; $_i583 < $_size579; ++$_i583) + $_size607 = 0; + $_etype610 = 0; + $xfer += $input->readListBegin($_etype610, $_size607); + for ($_i611 = 0; $_i611 < $_size607; ++$_i611) { - $elem584 = null; - $elem584 = new \metastore\TxnToWriteId(); - $xfer += $elem584->read($input); - $this->txnToWriteIds []= $elem584; + $elem612 = null; + $elem612 = new \metastore\TxnToWriteId(); + $xfer += $elem612->read($input); + $this->txnToWriteIds []= $elem612; } $xfer += $input->readListEnd(); } else { @@ -18637,9 +19271,9 @@ class AllocateTableWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); { - foreach ($this->txnToWriteIds as $iter585) + foreach ($this->txnToWriteIds as $iter613) { - $xfer += $iter585->write($output); + $xfer += $iter613->write($output); } } $output->writeListEnd(); @@ -18984,15 +19618,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size586 = 0; - $_etype589 = 0; - $xfer += $input->readListBegin($_etype589, $_size586); - for ($_i590 = 0; $_i590 < $_size586; ++$_i590) + $_size614 = 0; + $_etype617 = 0; + $xfer += $input->readListBegin($_etype617, $_size614); + for ($_i618 = 0; $_i618 < $_size614; ++$_i618) { - $elem591 = null; - $elem591 = new \metastore\LockComponent(); - $xfer += $elem591->read($input); - $this->component []= $elem591; + $elem619 = null; + $elem619 = new \metastore\LockComponent(); + $xfer += $elem619->read($input); + $this->component []= $elem619; } $xfer += $input->readListEnd(); } else { @@ -19048,9 +19682,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter592) + foreach ($this->component as $iter620) { - $xfer += $iter592->write($output); + $xfer += $iter620->write($output); } } $output->writeListEnd(); @@ -19993,15 +20627,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size593 = 0; - $_etype596 = 0; - $xfer += $input->readListBegin($_etype596, $_size593); - for ($_i597 = 0; $_i597 < $_size593; ++$_i597) + $_size621 = 0; + $_etype624 = 0; + $xfer += $input->readListBegin($_etype624, $_size621); + for ($_i625 = 0; $_i625 < $_size621; ++$_i625) { - $elem598 = null; - $elem598 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem598->read($input); - $this->locks []= $elem598; + $elem626 = null; + $elem626 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem626->read($input); + $this->locks []= $elem626; } $xfer += $input->readListEnd(); } else { @@ -20029,9 +20663,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter599) + foreach ($this->locks as $iter627) { - $xfer += $iter599->write($output); + $xfer += $iter627->write($output); } } $output->writeListEnd(); @@ -20306,17 +20940,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size600 = 0; - $_etype603 = 0; - $xfer += $input->readSetBegin($_etype603, $_size600); - for ($_i604 = 0; $_i604 < $_size600; ++$_i604) + $_size628 = 0; + $_etype631 = 0; + $xfer += $input->readSetBegin($_etype631, $_size628); + for ($_i632 = 0; $_i632 < $_size628; ++$_i632) { - $elem605 = null; - $xfer += $input->readI64($elem605); - if (is_scalar($elem605)) { - $this->aborted[$elem605] = true; + $elem633 = null; + $xfer += $input->readI64($elem633); + if (is_scalar($elem633)) { + $this->aborted[$elem633] = true; } else { - $this->aborted []= $elem605; + $this->aborted []= $elem633; } } $xfer += $input->readSetEnd(); @@ -20327,17 +20961,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size606 = 0; - $_etype609 = 0; - $xfer += $input->readSetBegin($_etype609, $_size606); - for ($_i610 = 0; $_i610 < $_size606; ++$_i610) + $_size634 = 0; + $_etype637 = 0; + $xfer += $input->readSetBegin($_etype637, $_size634); + for ($_i638 = 0; $_i638 < $_size634; ++$_i638) { - $elem611 = null; - $xfer += $input->readI64($elem611); - if (is_scalar($elem611)) { - $this->nosuch[$elem611] = true; + $elem639 = null; + $xfer += $input->readI64($elem639); + if (is_scalar($elem639)) { + $this->nosuch[$elem639] = true; } else { - $this->nosuch []= $elem611; + $this->nosuch []= $elem639; } } $xfer += $input->readSetEnd(); @@ -20366,12 +21000,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter612 => $iter613) + foreach ($this->aborted as $iter640 => $iter641) { - if (is_scalar($iter613)) { - $xfer += $output->writeI64($iter612); + if (is_scalar($iter641)) { + $xfer += $output->writeI64($iter640); } else { - $xfer += $output->writeI64($iter613); + $xfer += $output->writeI64($iter641); } } } @@ -20387,12 +21021,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter614 => $iter615) + foreach ($this->nosuch as $iter642 => $iter643) { - if (is_scalar($iter615)) { - $xfer += $output->writeI64($iter614); + if (is_scalar($iter643)) { + $xfer += $output->writeI64($iter642); } else { - $xfer += $output->writeI64($iter615); + $xfer += $output->writeI64($iter643); } } } @@ -20551,17 +21185,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size616 = 0; - $_ktype617 = 0; - $_vtype618 = 0; - $xfer += $input->readMapBegin($_ktype617, $_vtype618, $_size616); - for ($_i620 = 0; $_i620 < $_size616; ++$_i620) + $_size644 = 0; + $_ktype645 = 0; + $_vtype646 = 0; + $xfer += $input->readMapBegin($_ktype645, $_vtype646, $_size644); + for ($_i648 = 0; $_i648 < $_size644; ++$_i648) { - $key621 = ''; - $val622 = ''; - $xfer += $input->readString($key621); - $xfer += $input->readString($val622); - $this->properties[$key621] = $val622; + $key649 = ''; + $val650 = ''; + $xfer += $input->readString($key649); + $xfer += $input->readString($val650); + $this->properties[$key649] = $val650; } $xfer += $input->readMapEnd(); } else { @@ -20614,10 +21248,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter623 => $viter624) + foreach ($this->properties as $kiter651 => $viter652) { - $xfer += $output->writeString($kiter623); - $xfer += $output->writeString($viter624); + $xfer += $output->writeString($kiter651); + $xfer += $output->writeString($viter652); } } $output->writeMapEnd(); @@ -21204,15 +21838,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size625 = 0; - $_etype628 = 0; - $xfer += $input->readListBegin($_etype628, $_size625); - for ($_i629 = 0; $_i629 < $_size625; ++$_i629) + $_size653 = 0; + $_etype656 = 0; + $xfer += $input->readListBegin($_etype656, $_size653); + for ($_i657 = 0; $_i657 < $_size653; ++$_i657) { - $elem630 = null; - $elem630 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem630->read($input); - $this->compacts []= $elem630; + $elem658 = null; + $elem658 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem658->read($input); + $this->compacts []= $elem658; } $xfer += $input->readListEnd(); } else { @@ -21240,9 +21874,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter631) + foreach ($this->compacts as $iter659) { - $xfer += $iter631->write($output); + $xfer += $iter659->write($output); } } $output->writeListEnd(); @@ -21389,14 +22023,14 @@ class AddDynamicPartitions { case 5: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size632 = 0; - $_etype635 = 0; - $xfer += $input->readListBegin($_etype635, $_size632); - for ($_i636 = 0; $_i636 < $_size632; ++$_i636) + $_size660 = 0; + $_etype663 = 0; + $xfer += $input->readListBegin($_etype663, $_size660); + for ($_i664 = 0; $_i664 < $_size660; ++$_i664) { - $elem637 = null; - $xfer += $input->readString($elem637); - $this->partitionnames []= $elem637; + $elem665 = null; + $xfer += $input->readString($elem665); + $this->partitionnames []= $elem665; } $xfer += $input->readListEnd(); } else { @@ -21451,9 +22085,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter638) + foreach ($this->partitionnames as $iter666) { - $xfer += $output->writeString($iter638); + $xfer += $output->writeString($iter666); } } $output->writeListEnd(); @@ -21788,17 +22422,17 @@ class CreationMetadata { case 4: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size639 = 0; - $_etype642 = 0; - $xfer += $input->readSetBegin($_etype642, $_size639); - for ($_i643 = 0; $_i643 < $_size639; ++$_i643) + $_size667 = 0; + $_etype670 = 0; + $xfer += $input->readSetBegin($_etype670, $_size667); + for ($_i671 = 0; $_i671 < $_size667; ++$_i671) { - $elem644 = null; - $xfer += $input->readString($elem644); - if (is_scalar($elem644)) { - $this->tablesUsed[$elem644] = true; + $elem672 = null; + $xfer += $input->readString($elem672); + if (is_scalar($elem672)) { + $this->tablesUsed[$elem672] = true; } else { - $this->tablesUsed []= $elem644; + $this->tablesUsed []= $elem672; } } $xfer += $input->readSetEnd(); @@ -21856,12 +22490,12 @@ class CreationMetadata { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter645 => $iter646) + foreach ($this->tablesUsed as $iter673 => $iter674) { - if (is_scalar($iter646)) { - $xfer += $output->writeString($iter645); + if (is_scalar($iter674)) { + $xfer += $output->writeString($iter673); } else { - $xfer += $output->writeString($iter646); + $xfer += $output->writeString($iter674); } } } @@ -22271,15 +22905,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size647 = 0; - $_etype650 = 0; - $xfer += $input->readListBegin($_etype650, $_size647); - for ($_i651 = 0; $_i651 < $_size647; ++$_i651) + $_size675 = 0; + $_etype678 = 0; + $xfer += $input->readListBegin($_etype678, $_size675); + for ($_i679 = 0; $_i679 < $_size675; ++$_i679) { - $elem652 = null; - $elem652 = new \metastore\NotificationEvent(); - $xfer += $elem652->read($input); - $this->events []= $elem652; + $elem680 = null; + $elem680 = new \metastore\NotificationEvent(); + $xfer += $elem680->read($input); + $this->events []= $elem680; } $xfer += $input->readListEnd(); } else { @@ -22307,9 +22941,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter653) + foreach ($this->events as $iter681) { - $xfer += $iter653->write($output); + $xfer += $iter681->write($output); } } $output->writeListEnd(); @@ -22692,14 +23326,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size654 = 0; - $_etype657 = 0; - $xfer += $input->readListBegin($_etype657, $_size654); - for ($_i658 = 0; $_i658 < $_size654; ++$_i658) + $_size682 = 0; + $_etype685 = 0; + $xfer += $input->readListBegin($_etype685, $_size682); + for ($_i686 = 0; $_i686 < $_size682; ++$_i686) { - $elem659 = null; - $xfer += $input->readString($elem659); - $this->filesAdded []= $elem659; + $elem687 = null; + $xfer += $input->readString($elem687); + $this->filesAdded []= $elem687; } $xfer += $input->readListEnd(); } else { @@ -22709,14 +23343,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size660 = 0; - $_etype663 = 0; - $xfer += $input->readListBegin($_etype663, $_size660); - for ($_i664 = 0; $_i664 < $_size660; ++$_i664) + $_size688 = 0; + $_etype691 = 0; + $xfer += $input->readListBegin($_etype691, $_size688); + for ($_i692 = 0; $_i692 < $_size688; ++$_i692) { - $elem665 = null; - $xfer += $input->readString($elem665); - $this->filesAddedChecksum []= $elem665; + $elem693 = null; + $xfer += $input->readString($elem693); + $this->filesAddedChecksum []= $elem693; } $xfer += $input->readListEnd(); } else { @@ -22726,14 +23360,14 @@ class InsertEventRequestData { case 4: if ($ftype == TType::LST) { $this->subDirectoryList = array(); - $_size666 = 0; - $_etype669 = 0; - $xfer += $input->readListBegin($_etype669, $_size666); - for ($_i670 = 0; $_i670 < $_size666; ++$_i670) + $_size694 = 0; + $_etype697 = 0; + $xfer += $input->readListBegin($_etype697, $_size694); + for ($_i698 = 0; $_i698 < $_size694; ++$_i698) { - $elem671 = null; - $xfer += $input->readString($elem671); - $this->subDirectoryList []= $elem671; + $elem699 = null; + $xfer += $input->readString($elem699); + $this->subDirectoryList []= $elem699; } $xfer += $input->readListEnd(); } else { @@ -22766,9 +23400,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter672) + foreach ($this->filesAdded as $iter700) { - $xfer += $output->writeString($iter672); + $xfer += $output->writeString($iter700); } } $output->writeListEnd(); @@ -22783,9 +23417,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter673) + foreach ($this->filesAddedChecksum as $iter701) { - $xfer += $output->writeString($iter673); + $xfer += $output->writeString($iter701); } } $output->writeListEnd(); @@ -22800,9 +23434,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->subDirectoryList)); { - foreach ($this->subDirectoryList as $iter674) + foreach ($this->subDirectoryList as $iter702) { - $xfer += $output->writeString($iter674); + $xfer += $output->writeString($iter702); } } $output->writeListEnd(); @@ -23031,14 +23665,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size675 = 0; - $_etype678 = 0; - $xfer += $input->readListBegin($_etype678, $_size675); - for ($_i679 = 0; $_i679 < $_size675; ++$_i679) + $_size703 = 0; + $_etype706 = 0; + $xfer += $input->readListBegin($_etype706, $_size703); + for ($_i707 = 0; $_i707 < $_size703; ++$_i707) { - $elem680 = null; - $xfer += $input->readString($elem680); - $this->partitionVals []= $elem680; + $elem708 = null; + $xfer += $input->readString($elem708); + $this->partitionVals []= $elem708; } $xfer += $input->readListEnd(); } else { @@ -23096,9 +23730,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter681) + foreach ($this->partitionVals as $iter709) { - $xfer += $output->writeString($iter681); + $xfer += $output->writeString($iter709); } } $output->writeListEnd(); @@ -23309,14 +23943,14 @@ class WriteNotificationLogRequest { case 6: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size682 = 0; - $_etype685 = 0; - $xfer += $input->readListBegin($_etype685, $_size682); - for ($_i686 = 0; $_i686 < $_size682; ++$_i686) + $_size710 = 0; + $_etype713 = 0; + $xfer += $input->readListBegin($_etype713, $_size710); + for ($_i714 = 0; $_i714 < $_size710; ++$_i714) { - $elem687 = null; - $xfer += $input->readString($elem687); - $this->partitionVals []= $elem687; + $elem715 = null; + $xfer += $input->readString($elem715); + $this->partitionVals []= $elem715; } $xfer += $input->readListEnd(); } else { @@ -23372,9 +24006,9 @@ class WriteNotificationLogRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter688) + foreach ($this->partitionVals as $iter716) { - $xfer += $output->writeString($iter688); + $xfer += $output->writeString($iter716); } } $output->writeListEnd(); @@ -23602,18 +24236,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size689 = 0; - $_ktype690 = 0; - $_vtype691 = 0; - $xfer += $input->readMapBegin($_ktype690, $_vtype691, $_size689); - for ($_i693 = 0; $_i693 < $_size689; ++$_i693) + $_size717 = 0; + $_ktype718 = 0; + $_vtype719 = 0; + $xfer += $input->readMapBegin($_ktype718, $_vtype719, $_size717); + for ($_i721 = 0; $_i721 < $_size717; ++$_i721) { - $key694 = 0; - $val695 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key694); - $val695 = new \metastore\MetadataPpdResult(); - $xfer += $val695->read($input); - $this->metadata[$key694] = $val695; + $key722 = 0; + $val723 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key722); + $val723 = new \metastore\MetadataPpdResult(); + $xfer += $val723->read($input); + $this->metadata[$key722] = $val723; } $xfer += $input->readMapEnd(); } else { @@ -23648,10 +24282,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter696 => $viter697) + foreach ($this->metadata as $kiter724 => $viter725) { - $xfer += $output->writeI64($kiter696); - $xfer += $viter697->write($output); + $xfer += $output->writeI64($kiter724); + $xfer += $viter725->write($output); } } $output->writeMapEnd(); @@ -23753,14 +24387,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size698 = 0; - $_etype701 = 0; - $xfer += $input->readListBegin($_etype701, $_size698); - for ($_i702 = 0; $_i702 < $_size698; ++$_i702) + $_size726 = 0; + $_etype729 = 0; + $xfer += $input->readListBegin($_etype729, $_size726); + for ($_i730 = 0; $_i730 < $_size726; ++$_i730) { - $elem703 = null; - $xfer += $input->readI64($elem703); - $this->fileIds []= $elem703; + $elem731 = null; + $xfer += $input->readI64($elem731); + $this->fileIds []= $elem731; } $xfer += $input->readListEnd(); } else { @@ -23809,9 +24443,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter704) + foreach ($this->fileIds as $iter732) { - $xfer += $output->writeI64($iter704); + $xfer += $output->writeI64($iter732); } } $output->writeListEnd(); @@ -23905,17 +24539,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size705 = 0; - $_ktype706 = 0; - $_vtype707 = 0; - $xfer += $input->readMapBegin($_ktype706, $_vtype707, $_size705); - for ($_i709 = 0; $_i709 < $_size705; ++$_i709) + $_size733 = 0; + $_ktype734 = 0; + $_vtype735 = 0; + $xfer += $input->readMapBegin($_ktype734, $_vtype735, $_size733); + for ($_i737 = 0; $_i737 < $_size733; ++$_i737) { - $key710 = 0; - $val711 = ''; - $xfer += $input->readI64($key710); - $xfer += $input->readString($val711); - $this->metadata[$key710] = $val711; + $key738 = 0; + $val739 = ''; + $xfer += $input->readI64($key738); + $xfer += $input->readString($val739); + $this->metadata[$key738] = $val739; } $xfer += $input->readMapEnd(); } else { @@ -23950,10 +24584,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter712 => $viter713) + foreach ($this->metadata as $kiter740 => $viter741) { - $xfer += $output->writeI64($kiter712); - $xfer += $output->writeString($viter713); + $xfer += $output->writeI64($kiter740); + $xfer += $output->writeString($viter741); } } $output->writeMapEnd(); @@ -24022,14 +24656,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size714 = 0; - $_etype717 = 0; - $xfer += $input->readListBegin($_etype717, $_size714); - for ($_i718 = 0; $_i718 < $_size714; ++$_i718) + $_size742 = 0; + $_etype745 = 0; + $xfer += $input->readListBegin($_etype745, $_size742); + for ($_i746 = 0; $_i746 < $_size742; ++$_i746) { - $elem719 = null; - $xfer += $input->readI64($elem719); - $this->fileIds []= $elem719; + $elem747 = null; + $xfer += $input->readI64($elem747); + $this->fileIds []= $elem747; } $xfer += $input->readListEnd(); } else { @@ -24057,9 +24691,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter720) + foreach ($this->fileIds as $iter748) { - $xfer += $output->writeI64($iter720); + $xfer += $output->writeI64($iter748); } } $output->writeListEnd(); @@ -24199,14 +24833,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size721 = 0; - $_etype724 = 0; - $xfer += $input->readListBegin($_etype724, $_size721); - for ($_i725 = 0; $_i725 < $_size721; ++$_i725) + $_size749 = 0; + $_etype752 = 0; + $xfer += $input->readListBegin($_etype752, $_size749); + for ($_i753 = 0; $_i753 < $_size749; ++$_i753) { - $elem726 = null; - $xfer += $input->readI64($elem726); - $this->fileIds []= $elem726; + $elem754 = null; + $xfer += $input->readI64($elem754); + $this->fileIds []= $elem754; } $xfer += $input->readListEnd(); } else { @@ -24216,14 +24850,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size727 = 0; - $_etype730 = 0; - $xfer += $input->readListBegin($_etype730, $_size727); - for ($_i731 = 0; $_i731 < $_size727; ++$_i731) + $_size755 = 0; + $_etype758 = 0; + $xfer += $input->readListBegin($_etype758, $_size755); + for ($_i759 = 0; $_i759 < $_size755; ++$_i759) { - $elem732 = null; - $xfer += $input->readString($elem732); - $this->metadata []= $elem732; + $elem760 = null; + $xfer += $input->readString($elem760); + $this->metadata []= $elem760; } $xfer += $input->readListEnd(); } else { @@ -24258,9 +24892,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter733) + foreach ($this->fileIds as $iter761) { - $xfer += $output->writeI64($iter733); + $xfer += $output->writeI64($iter761); } } $output->writeListEnd(); @@ -24275,9 +24909,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter734) + foreach ($this->metadata as $iter762) { - $xfer += $output->writeString($iter734); + $xfer += $output->writeString($iter762); } } $output->writeListEnd(); @@ -24396,14 +25030,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size735 = 0; - $_etype738 = 0; - $xfer += $input->readListBegin($_etype738, $_size735); - for ($_i739 = 0; $_i739 < $_size735; ++$_i739) + $_size763 = 0; + $_etype766 = 0; + $xfer += $input->readListBegin($_etype766, $_size763); + for ($_i767 = 0; $_i767 < $_size763; ++$_i767) { - $elem740 = null; - $xfer += $input->readI64($elem740); - $this->fileIds []= $elem740; + $elem768 = null; + $xfer += $input->readI64($elem768); + $this->fileIds []= $elem768; } $xfer += $input->readListEnd(); } else { @@ -24431,9 +25065,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter741) + foreach ($this->fileIds as $iter769) { - $xfer += $output->writeI64($iter741); + $xfer += $output->writeI64($iter769); } } $output->writeListEnd(); @@ -24717,15 +25351,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size742 = 0; - $_etype745 = 0; - $xfer += $input->readListBegin($_etype745, $_size742); - for ($_i746 = 0; $_i746 < $_size742; ++$_i746) + $_size770 = 0; + $_etype773 = 0; + $xfer += $input->readListBegin($_etype773, $_size770); + for ($_i774 = 0; $_i774 < $_size770; ++$_i774) { - $elem747 = null; - $elem747 = new \metastore\Function(); - $xfer += $elem747->read($input); - $this->functions []= $elem747; + $elem775 = null; + $elem775 = new \metastore\Function(); + $xfer += $elem775->read($input); + $this->functions []= $elem775; } $xfer += $input->readListEnd(); } else { @@ -24753,9 +25387,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter748) + foreach ($this->functions as $iter776) { - $xfer += $iter748->write($output); + $xfer += $iter776->write($output); } } $output->writeListEnd(); @@ -24819,14 +25453,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size749 = 0; - $_etype752 = 0; - $xfer += $input->readListBegin($_etype752, $_size749); - for ($_i753 = 0; $_i753 < $_size749; ++$_i753) + $_size777 = 0; + $_etype780 = 0; + $xfer += $input->readListBegin($_etype780, $_size777); + for ($_i781 = 0; $_i781 < $_size777; ++$_i781) { - $elem754 = null; - $xfer += $input->readI32($elem754); - $this->values []= $elem754; + $elem782 = null; + $xfer += $input->readI32($elem782); + $this->values []= $elem782; } $xfer += $input->readListEnd(); } else { @@ -24854,9 +25488,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter755) + foreach ($this->values as $iter783) { - $xfer += $output->writeI32($iter755); + $xfer += $output->writeI32($iter783); } } $output->writeListEnd(); @@ -25236,14 +25870,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size756 = 0; - $_etype759 = 0; - $xfer += $input->readListBegin($_etype759, $_size756); - for ($_i760 = 0; $_i760 < $_size756; ++$_i760) + $_size784 = 0; + $_etype787 = 0; + $xfer += $input->readListBegin($_etype787, $_size784); + for ($_i788 = 0; $_i788 < $_size784; ++$_i788) { - $elem761 = null; - $xfer += $input->readString($elem761); - $this->tblNames []= $elem761; + $elem789 = null; + $xfer += $input->readString($elem789); + $this->tblNames []= $elem789; } $xfer += $input->readListEnd(); } else { @@ -25291,9 +25925,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter762) + foreach ($this->tblNames as $iter790) { - $xfer += $output->writeString($iter762); + $xfer += $output->writeString($iter790); } } $output->writeListEnd(); @@ -25371,15 +26005,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size763 = 0; - $_etype766 = 0; - $xfer += $input->readListBegin($_etype766, $_size763); - for ($_i767 = 0; $_i767 < $_size763; ++$_i767) + $_size791 = 0; + $_etype794 = 0; + $xfer += $input->readListBegin($_etype794, $_size791); + for ($_i795 = 0; $_i795 < $_size791; ++$_i795) { - $elem768 = null; - $elem768 = new \metastore\Table(); - $xfer += $elem768->read($input); - $this->tables []= $elem768; + $elem796 = null; + $elem796 = new \metastore\Table(); + $xfer += $elem796->read($input); + $this->tables []= $elem796; } $xfer += $input->readListEnd(); } else { @@ -25407,9 +26041,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter769) + foreach ($this->tables as $iter797) { - $xfer += $iter769->write($output); + $xfer += $iter797->write($output); } } $output->writeListEnd(); @@ -27055,15 +27689,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size770 = 0; - $_etype773 = 0; - $xfer += $input->readListBegin($_etype773, $_size770); - for ($_i774 = 0; $_i774 < $_size770; ++$_i774) + $_size798 = 0; + $_etype801 = 0; + $xfer += $input->readListBegin($_etype801, $_size798); + for ($_i802 = 0; $_i802 < $_size798; ++$_i802) { - $elem775 = null; - $elem775 = new \metastore\WMPool(); - $xfer += $elem775->read($input); - $this->pools []= $elem775; + $elem803 = null; + $elem803 = new \metastore\WMPool(); + $xfer += $elem803->read($input); + $this->pools []= $elem803; } $xfer += $input->readListEnd(); } else { @@ -27073,15 +27707,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size776 = 0; - $_etype779 = 0; - $xfer += $input->readListBegin($_etype779, $_size776); - for ($_i780 = 0; $_i780 < $_size776; ++$_i780) + $_size804 = 0; + $_etype807 = 0; + $xfer += $input->readListBegin($_etype807, $_size804); + for ($_i808 = 0; $_i808 < $_size804; ++$_i808) { - $elem781 = null; - $elem781 = new \metastore\WMMapping(); - $xfer += $elem781->read($input); - $this->mappings []= $elem781; + $elem809 = null; + $elem809 = new \metastore\WMMapping(); + $xfer += $elem809->read($input); + $this->mappings []= $elem809; } $xfer += $input->readListEnd(); } else { @@ -27091,15 +27725,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size782 = 0; - $_etype785 = 0; - $xfer += $input->readListBegin($_etype785, $_size782); - for ($_i786 = 0; $_i786 < $_size782; ++$_i786) + $_size810 = 0; + $_etype813 = 0; + $xfer += $input->readListBegin($_etype813, $_size810); + for ($_i814 = 0; $_i814 < $_size810; ++$_i814) { - $elem787 = null; - $elem787 = new \metastore\WMTrigger(); - $xfer += $elem787->read($input); - $this->triggers []= $elem787; + $elem815 = null; + $elem815 = new \metastore\WMTrigger(); + $xfer += $elem815->read($input); + $this->triggers []= $elem815; } $xfer += $input->readListEnd(); } else { @@ -27109,15 +27743,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size788 = 0; - $_etype791 = 0; - $xfer += $input->readListBegin($_etype791, $_size788); - for ($_i792 = 0; $_i792 < $_size788; ++$_i792) + $_size816 = 0; + $_etype819 = 0; + $xfer += $input->readListBegin($_etype819, $_size816); + for ($_i820 = 0; $_i820 < $_size816; ++$_i820) { - $elem793 = null; - $elem793 = new \metastore\WMPoolTrigger(); - $xfer += $elem793->read($input); - $this->poolTriggers []= $elem793; + $elem821 = null; + $elem821 = new \metastore\WMPoolTrigger(); + $xfer += $elem821->read($input); + $this->poolTriggers []= $elem821; } $xfer += $input->readListEnd(); } else { @@ -27153,9 +27787,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter794) + foreach ($this->pools as $iter822) { - $xfer += $iter794->write($output); + $xfer += $iter822->write($output); } } $output->writeListEnd(); @@ -27170,9 +27804,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter795) + foreach ($this->mappings as $iter823) { - $xfer += $iter795->write($output); + $xfer += $iter823->write($output); } } $output->writeListEnd(); @@ -27187,9 +27821,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter796) + foreach ($this->triggers as $iter824) { - $xfer += $iter796->write($output); + $xfer += $iter824->write($output); } } $output->writeListEnd(); @@ -27204,9 +27838,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter797) + foreach ($this->poolTriggers as $iter825) { - $xfer += $iter797->write($output); + $xfer += $iter825->write($output); } } $output->writeListEnd(); @@ -27759,15 +28393,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size798 = 0; - $_etype801 = 0; - $xfer += $input->readListBegin($_etype801, $_size798); - for ($_i802 = 0; $_i802 < $_size798; ++$_i802) + $_size826 = 0; + $_etype829 = 0; + $xfer += $input->readListBegin($_etype829, $_size826); + for ($_i830 = 0; $_i830 < $_size826; ++$_i830) { - $elem803 = null; - $elem803 = new \metastore\WMResourcePlan(); - $xfer += $elem803->read($input); - $this->resourcePlans []= $elem803; + $elem831 = null; + $elem831 = new \metastore\WMResourcePlan(); + $xfer += $elem831->read($input); + $this->resourcePlans []= $elem831; } $xfer += $input->readListEnd(); } else { @@ -27795,9 +28429,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter804) + foreach ($this->resourcePlans as $iter832) { - $xfer += $iter804->write($output); + $xfer += $iter832->write($output); } } $output->writeListEnd(); @@ -28203,14 +28837,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size805 = 0; - $_etype808 = 0; - $xfer += $input->readListBegin($_etype808, $_size805); - for ($_i809 = 0; $_i809 < $_size805; ++$_i809) + $_size833 = 0; + $_etype836 = 0; + $xfer += $input->readListBegin($_etype836, $_size833); + for ($_i837 = 0; $_i837 < $_size833; ++$_i837) { - $elem810 = null; - $xfer += $input->readString($elem810); - $this->errors []= $elem810; + $elem838 = null; + $xfer += $input->readString($elem838); + $this->errors []= $elem838; } $xfer += $input->readListEnd(); } else { @@ -28220,14 +28854,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size811 = 0; - $_etype814 = 0; - $xfer += $input->readListBegin($_etype814, $_size811); - for ($_i815 = 0; $_i815 < $_size811; ++$_i815) + $_size839 = 0; + $_etype842 = 0; + $xfer += $input->readListBegin($_etype842, $_size839); + for ($_i843 = 0; $_i843 < $_size839; ++$_i843) { - $elem816 = null; - $xfer += $input->readString($elem816); - $this->warnings []= $elem816; + $elem844 = null; + $xfer += $input->readString($elem844); + $this->warnings []= $elem844; } $xfer += $input->readListEnd(); } else { @@ -28255,9 +28889,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter817) + foreach ($this->errors as $iter845) { - $xfer += $output->writeString($iter817); + $xfer += $output->writeString($iter845); } } $output->writeListEnd(); @@ -28272,9 +28906,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter818) + foreach ($this->warnings as $iter846) { - $xfer += $output->writeString($iter818); + $xfer += $output->writeString($iter846); } } $output->writeListEnd(); @@ -28947,15 +29581,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size819 = 0; - $_etype822 = 0; - $xfer += $input->readListBegin($_etype822, $_size819); - for ($_i823 = 0; $_i823 < $_size819; ++$_i823) + $_size847 = 0; + $_etype850 = 0; + $xfer += $input->readListBegin($_etype850, $_size847); + for ($_i851 = 0; $_i851 < $_size847; ++$_i851) { - $elem824 = null; - $elem824 = new \metastore\WMTrigger(); - $xfer += $elem824->read($input); - $this->triggers []= $elem824; + $elem852 = null; + $elem852 = new \metastore\WMTrigger(); + $xfer += $elem852->read($input); + $this->triggers []= $elem852; } $xfer += $input->readListEnd(); } else { @@ -28983,9 +29617,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter825) + foreach ($this->triggers as $iter853) { - $xfer += $iter825->write($output); + $xfer += $iter853->write($output); } } $output->writeListEnd(); @@ -30569,15 +31203,15 @@ class SchemaVersion { case 4: if ($ftype == TType::LST) { $this->cols = array(); - $_size826 = 0; - $_etype829 = 0; - $xfer += $input->readListBegin($_etype829, $_size826); - for ($_i830 = 0; $_i830 < $_size826; ++$_i830) + $_size854 = 0; + $_etype857 = 0; + $xfer += $input->readListBegin($_etype857, $_size854); + for ($_i858 = 0; $_i858 < $_size854; ++$_i858) { - $elem831 = null; - $elem831 = new \metastore\FieldSchema(); - $xfer += $elem831->read($input); - $this->cols []= $elem831; + $elem859 = null; + $elem859 = new \metastore\FieldSchema(); + $xfer += $elem859->read($input); + $this->cols []= $elem859; } $xfer += $input->readListEnd(); } else { @@ -30666,9 +31300,9 @@ class SchemaVersion { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter832) + foreach ($this->cols as $iter860) { - $xfer += $iter832->write($output); + $xfer += $iter860->write($output); } } $output->writeListEnd(); @@ -30990,15 +31624,15 @@ class FindSchemasByColsResp { case 1: if ($ftype == TType::LST) { $this->schemaVersions = array(); - $_size833 = 0; - $_etype836 = 0; - $xfer += $input->readListBegin($_etype836, $_size833); - for ($_i837 = 0; $_i837 < $_size833; ++$_i837) + $_size861 = 0; + $_etype864 = 0; + $xfer += $input->readListBegin($_etype864, $_size861); + for ($_i865 = 0; $_i865 < $_size861; ++$_i865) { - $elem838 = null; - $elem838 = new \metastore\SchemaVersionDescriptor(); - $xfer += $elem838->read($input); - $this->schemaVersions []= $elem838; + $elem866 = null; + $elem866 = new \metastore\SchemaVersionDescriptor(); + $xfer += $elem866->read($input); + $this->schemaVersions []= $elem866; } $xfer += $input->readListEnd(); } else { @@ -31026,9 +31660,9 @@ class FindSchemasByColsResp { { $output->writeListBegin(TType::STRUCT, count($this->schemaVersions)); { - foreach ($this->schemaVersions as $iter839) + foreach ($this->schemaVersions as $iter867) { - $xfer += $iter839->write($output); + $xfer += $iter867->write($output); } } $output->writeListEnd(); @@ -31681,15 +32315,15 @@ class AlterPartitionsRequest { case 4: if ($ftype == TType::LST) { $this->partitions = array(); - $_size840 = 0; - $_etype843 = 0; - $xfer += $input->readListBegin($_etype843, $_size840); - for ($_i844 = 0; $_i844 < $_size840; ++$_i844) + $_size868 = 0; + $_etype871 = 0; + $xfer += $input->readListBegin($_etype871, $_size868); + for ($_i872 = 0; $_i872 < $_size868; ++$_i872) { - $elem845 = null; - $elem845 = new \metastore\Partition(); - $xfer += $elem845->read($input); - $this->partitions []= $elem845; + $elem873 = null; + $elem873 = new \metastore\Partition(); + $xfer += $elem873->read($input); + $this->partitions []= $elem873; } $xfer += $input->readListEnd(); } else { @@ -31754,9 +32388,9 @@ class AlterPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter846) + foreach ($this->partitions as $iter874) { - $xfer += $iter846->write($output); + $xfer += $iter874->write($output); } } $output->writeListEnd(); @@ -31965,14 +32599,14 @@ class RenamePartitionRequest { case 4: if ($ftype == TType::LST) { $this->partVals = array(); - $_size847 = 0; - $_etype850 = 0; - $xfer += $input->readListBegin($_etype850, $_size847); - for ($_i851 = 0; $_i851 < $_size847; ++$_i851) + $_size875 = 0; + $_etype878 = 0; + $xfer += $input->readListBegin($_etype878, $_size875); + for ($_i879 = 0; $_i879 < $_size875; ++$_i879) { - $elem852 = null; - $xfer += $input->readString($elem852); - $this->partVals []= $elem852; + $elem880 = null; + $xfer += $input->readString($elem880); + $this->partVals []= $elem880; } $xfer += $input->readListEnd(); } else { @@ -32030,9 +32664,9 @@ class RenamePartitionRequest { { $output->writeListBegin(TType::STRING, count($this->partVals)); { - foreach ($this->partVals as $iter853) + foreach ($this->partVals as $iter881) { - $xfer += $output->writeString($iter853); + $xfer += $output->writeString($iter881); } } $output->writeListEnd(); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index a595732f04af4304974186178377192227bb80fb..d57de353c64bb4219d6ddbd187bfe2d78b5de449 100755 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -238,6 +238,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' bool heartbeat_lock_materialization_rebuild(string dbName, string tableName, i64 txnId)') print(' void add_runtime_stats(RuntimeStat stat)') print(' get_runtime_stats(GetRuntimeStatsRequest rqst)') + print(' GetPartitionsResponse get_partitions_with_specs(GetPartitionsRequest request)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1591,6 +1592,12 @@ elif cmd == 'get_runtime_stats': sys.exit(1) pp.pprint(client.get_runtime_stats(eval(args[0]),)) +elif cmd == 'get_partitions_with_specs': + if len(args) != 1: + print('get_partitions_with_specs requires 1 args') + sys.exit(1) + pp.pprint(client.get_partitions_with_specs(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index d098dba100bdfe56aa6ecafb31880098a2d7c6cb..154d2ce5a3cd57304a94231bb43d69aba53bd517 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1652,6 +1652,13 @@ def get_runtime_stats(self, rqst): """ pass + def get_partitions_with_specs(self, request): + """ + Parameters: + - request + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -9297,6 +9304,39 @@ def recv_get_runtime_stats(self): raise result.o1 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_runtime_stats failed: unknown result") + def get_partitions_with_specs(self, request): + """ + Parameters: + - request + """ + self.send_get_partitions_with_specs(request) + return self.recv_get_partitions_with_specs() + + def send_get_partitions_with_specs(self, request): + self._oprot.writeMessageBegin('get_partitions_with_specs', TMessageType.CALL, self._seqid) + args = get_partitions_with_specs_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_with_specs(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_partitions_with_specs_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_with_specs failed: unknown result") + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -9515,6 +9555,7 @@ def __init__(self, handler): self._processMap["heartbeat_lock_materialization_rebuild"] = Processor.process_heartbeat_lock_materialization_rebuild self._processMap["add_runtime_stats"] = Processor.process_add_runtime_stats self._processMap["get_runtime_stats"] = Processor.process_get_runtime_stats + self._processMap["get_partitions_with_specs"] = Processor.process_get_partitions_with_specs def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -14818,6 +14859,28 @@ def process_get_runtime_stats(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_partitions_with_specs(self, seqid, iprot, oprot): + args = get_partitions_with_specs_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_with_specs_result() + try: + result.success = self._handler.get_partitions_with_specs(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_partitions_with_specs", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -16468,10 +16531,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype854, _size851) = iprot.readListBegin() - for _i855 in xrange(_size851): - _elem856 = iprot.readString() - self.success.append(_elem856) + (_etype882, _size879) = iprot.readListBegin() + for _i883 in xrange(_size879): + _elem884 = iprot.readString() + self.success.append(_elem884) iprot.readListEnd() else: iprot.skip(ftype) @@ -16494,8 +16557,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter857 in self.success: - oprot.writeString(iter857) + for iter885 in self.success: + oprot.writeString(iter885) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16600,10 +16663,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype861, _size858) = iprot.readListBegin() - for _i862 in xrange(_size858): - _elem863 = iprot.readString() - self.success.append(_elem863) + (_etype889, _size886) = iprot.readListBegin() + for _i890 in xrange(_size886): + _elem891 = iprot.readString() + self.success.append(_elem891) iprot.readListEnd() else: iprot.skip(ftype) @@ -16626,8 +16689,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter864 in self.success: - oprot.writeString(iter864) + for iter892 in self.success: + oprot.writeString(iter892) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17397,12 +17460,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype866, _vtype867, _size865 ) = iprot.readMapBegin() - for _i869 in xrange(_size865): - _key870 = iprot.readString() - _val871 = Type() - _val871.read(iprot) - self.success[_key870] = _val871 + (_ktype894, _vtype895, _size893 ) = iprot.readMapBegin() + for _i897 in xrange(_size893): + _key898 = iprot.readString() + _val899 = Type() + _val899.read(iprot) + self.success[_key898] = _val899 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17425,9 +17488,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter872,viter873 in self.success.items(): - oprot.writeString(kiter872) - viter873.write(oprot) + for kiter900,viter901 in self.success.items(): + oprot.writeString(kiter900) + viter901.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -17570,11 +17633,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype877, _size874) = iprot.readListBegin() - for _i878 in xrange(_size874): - _elem879 = FieldSchema() - _elem879.read(iprot) - self.success.append(_elem879) + (_etype905, _size902) = iprot.readListBegin() + for _i906 in xrange(_size902): + _elem907 = FieldSchema() + _elem907.read(iprot) + self.success.append(_elem907) iprot.readListEnd() else: iprot.skip(ftype) @@ -17609,8 +17672,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter880 in self.success: - iter880.write(oprot) + for iter908 in self.success: + iter908.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17777,11 +17840,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype884, _size881) = iprot.readListBegin() - for _i885 in xrange(_size881): - _elem886 = FieldSchema() - _elem886.read(iprot) - self.success.append(_elem886) + (_etype912, _size909) = iprot.readListBegin() + for _i913 in xrange(_size909): + _elem914 = FieldSchema() + _elem914.read(iprot) + self.success.append(_elem914) iprot.readListEnd() else: iprot.skip(ftype) @@ -17816,8 +17879,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter887 in self.success: - iter887.write(oprot) + for iter915 in self.success: + iter915.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17970,11 +18033,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype891, _size888) = iprot.readListBegin() - for _i892 in xrange(_size888): - _elem893 = FieldSchema() - _elem893.read(iprot) - self.success.append(_elem893) + (_etype919, _size916) = iprot.readListBegin() + for _i920 in xrange(_size916): + _elem921 = FieldSchema() + _elem921.read(iprot) + self.success.append(_elem921) iprot.readListEnd() else: iprot.skip(ftype) @@ -18009,8 +18072,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter894 in self.success: - iter894.write(oprot) + for iter922 in self.success: + iter922.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18177,11 +18240,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype898, _size895) = iprot.readListBegin() - for _i899 in xrange(_size895): - _elem900 = FieldSchema() - _elem900.read(iprot) - self.success.append(_elem900) + (_etype926, _size923) = iprot.readListBegin() + for _i927 in xrange(_size923): + _elem928 = FieldSchema() + _elem928.read(iprot) + self.success.append(_elem928) iprot.readListEnd() else: iprot.skip(ftype) @@ -18216,8 +18279,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter901 in self.success: - iter901.write(oprot) + for iter929 in self.success: + iter929.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18670,66 +18733,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype905, _size902) = iprot.readListBegin() - for _i906 in xrange(_size902): - _elem907 = SQLPrimaryKey() - _elem907.read(iprot) - self.primaryKeys.append(_elem907) + (_etype933, _size930) = iprot.readListBegin() + for _i934 in xrange(_size930): + _elem935 = SQLPrimaryKey() + _elem935.read(iprot) + self.primaryKeys.append(_elem935) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype911, _size908) = iprot.readListBegin() - for _i912 in xrange(_size908): - _elem913 = SQLForeignKey() - _elem913.read(iprot) - self.foreignKeys.append(_elem913) + (_etype939, _size936) = iprot.readListBegin() + for _i940 in xrange(_size936): + _elem941 = SQLForeignKey() + _elem941.read(iprot) + self.foreignKeys.append(_elem941) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype917, _size914) = iprot.readListBegin() - for _i918 in xrange(_size914): - _elem919 = SQLUniqueConstraint() - _elem919.read(iprot) - self.uniqueConstraints.append(_elem919) + (_etype945, _size942) = iprot.readListBegin() + for _i946 in xrange(_size942): + _elem947 = SQLUniqueConstraint() + _elem947.read(iprot) + self.uniqueConstraints.append(_elem947) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype923, _size920) = iprot.readListBegin() - for _i924 in xrange(_size920): - _elem925 = SQLNotNullConstraint() - _elem925.read(iprot) - self.notNullConstraints.append(_elem925) + (_etype951, _size948) = iprot.readListBegin() + for _i952 in xrange(_size948): + _elem953 = SQLNotNullConstraint() + _elem953.read(iprot) + self.notNullConstraints.append(_elem953) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype929, _size926) = iprot.readListBegin() - for _i930 in xrange(_size926): - _elem931 = SQLDefaultConstraint() - _elem931.read(iprot) - self.defaultConstraints.append(_elem931) + (_etype957, _size954) = iprot.readListBegin() + for _i958 in xrange(_size954): + _elem959 = SQLDefaultConstraint() + _elem959.read(iprot) + self.defaultConstraints.append(_elem959) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype935, _size932) = iprot.readListBegin() - for _i936 in xrange(_size932): - _elem937 = SQLCheckConstraint() - _elem937.read(iprot) - self.checkConstraints.append(_elem937) + (_etype963, _size960) = iprot.readListBegin() + for _i964 in xrange(_size960): + _elem965 = SQLCheckConstraint() + _elem965.read(iprot) + self.checkConstraints.append(_elem965) iprot.readListEnd() else: iprot.skip(ftype) @@ -18750,43 +18813,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter938 in self.primaryKeys: - iter938.write(oprot) + for iter966 in self.primaryKeys: + iter966.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter939 in self.foreignKeys: - iter939.write(oprot) + for iter967 in self.foreignKeys: + iter967.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter940 in self.uniqueConstraints: - iter940.write(oprot) + for iter968 in self.uniqueConstraints: + iter968.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter941 in self.notNullConstraints: - iter941.write(oprot) + for iter969 in self.notNullConstraints: + iter969.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter942 in self.defaultConstraints: - iter942.write(oprot) + for iter970 in self.defaultConstraints: + iter970.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter943 in self.checkConstraints: - iter943.write(oprot) + for iter971 in self.checkConstraints: + iter971.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20346,10 +20409,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype947, _size944) = iprot.readListBegin() - for _i948 in xrange(_size944): - _elem949 = iprot.readString() - self.partNames.append(_elem949) + (_etype975, _size972) = iprot.readListBegin() + for _i976 in xrange(_size972): + _elem977 = iprot.readString() + self.partNames.append(_elem977) iprot.readListEnd() else: iprot.skip(ftype) @@ -20374,8 +20437,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter950 in self.partNames: - oprot.writeString(iter950) + for iter978 in self.partNames: + oprot.writeString(iter978) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20720,10 +20783,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype954, _size951) = iprot.readListBegin() - for _i955 in xrange(_size951): - _elem956 = iprot.readString() - self.success.append(_elem956) + (_etype982, _size979) = iprot.readListBegin() + for _i983 in xrange(_size979): + _elem984 = iprot.readString() + self.success.append(_elem984) iprot.readListEnd() else: iprot.skip(ftype) @@ -20746,8 +20809,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter957 in self.success: - oprot.writeString(iter957) + for iter985 in self.success: + oprot.writeString(iter985) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20897,10 +20960,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype961, _size958) = iprot.readListBegin() - for _i962 in xrange(_size958): - _elem963 = iprot.readString() - self.success.append(_elem963) + (_etype989, _size986) = iprot.readListBegin() + for _i990 in xrange(_size986): + _elem991 = iprot.readString() + self.success.append(_elem991) iprot.readListEnd() else: iprot.skip(ftype) @@ -20923,8 +20986,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter964 in self.success: - oprot.writeString(iter964) + for iter992 in self.success: + oprot.writeString(iter992) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21048,10 +21111,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype968, _size965) = iprot.readListBegin() - for _i969 in xrange(_size965): - _elem970 = iprot.readString() - self.success.append(_elem970) + (_etype996, _size993) = iprot.readListBegin() + for _i997 in xrange(_size993): + _elem998 = iprot.readString() + self.success.append(_elem998) iprot.readListEnd() else: iprot.skip(ftype) @@ -21074,8 +21137,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter971 in self.success: - oprot.writeString(iter971) + for iter999 in self.success: + oprot.writeString(iter999) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21148,10 +21211,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype975, _size972) = iprot.readListBegin() - for _i976 in xrange(_size972): - _elem977 = iprot.readString() - self.tbl_types.append(_elem977) + (_etype1003, _size1000) = iprot.readListBegin() + for _i1004 in xrange(_size1000): + _elem1005 = iprot.readString() + self.tbl_types.append(_elem1005) iprot.readListEnd() else: iprot.skip(ftype) @@ -21176,8 +21239,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter978 in self.tbl_types: - oprot.writeString(iter978) + for iter1006 in self.tbl_types: + oprot.writeString(iter1006) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21233,11 +21296,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype982, _size979) = iprot.readListBegin() - for _i983 in xrange(_size979): - _elem984 = TableMeta() - _elem984.read(iprot) - self.success.append(_elem984) + (_etype1010, _size1007) = iprot.readListBegin() + for _i1011 in xrange(_size1007): + _elem1012 = TableMeta() + _elem1012.read(iprot) + self.success.append(_elem1012) iprot.readListEnd() else: iprot.skip(ftype) @@ -21260,8 +21323,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter985 in self.success: - iter985.write(oprot) + for iter1013 in self.success: + iter1013.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21385,10 +21448,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype989, _size986) = iprot.readListBegin() - for _i990 in xrange(_size986): - _elem991 = iprot.readString() - self.success.append(_elem991) + (_etype1017, _size1014) = iprot.readListBegin() + for _i1018 in xrange(_size1014): + _elem1019 = iprot.readString() + self.success.append(_elem1019) iprot.readListEnd() else: iprot.skip(ftype) @@ -21411,8 +21474,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter992 in self.success: - oprot.writeString(iter992) + for iter1020 in self.success: + oprot.writeString(iter1020) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21648,10 +21711,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype996, _size993) = iprot.readListBegin() - for _i997 in xrange(_size993): - _elem998 = iprot.readString() - self.tbl_names.append(_elem998) + (_etype1024, _size1021) = iprot.readListBegin() + for _i1025 in xrange(_size1021): + _elem1026 = iprot.readString() + self.tbl_names.append(_elem1026) iprot.readListEnd() else: iprot.skip(ftype) @@ -21672,8 +21735,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter999 in self.tbl_names: - oprot.writeString(iter999) + for iter1027 in self.tbl_names: + oprot.writeString(iter1027) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21725,11 +21788,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1003, _size1000) = iprot.readListBegin() - for _i1004 in xrange(_size1000): - _elem1005 = Table() - _elem1005.read(iprot) - self.success.append(_elem1005) + (_etype1031, _size1028) = iprot.readListBegin() + for _i1032 in xrange(_size1028): + _elem1033 = Table() + _elem1033.read(iprot) + self.success.append(_elem1033) iprot.readListEnd() else: iprot.skip(ftype) @@ -21746,8 +21809,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1006 in self.success: - iter1006.write(oprot) + for iter1034 in self.success: + iter1034.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22615,10 +22678,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1010, _size1007) = iprot.readListBegin() - for _i1011 in xrange(_size1007): - _elem1012 = iprot.readString() - self.success.append(_elem1012) + (_etype1038, _size1035) = iprot.readListBegin() + for _i1039 in xrange(_size1035): + _elem1040 = iprot.readString() + self.success.append(_elem1040) iprot.readListEnd() else: iprot.skip(ftype) @@ -22653,8 +22716,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1013 in self.success: - oprot.writeString(iter1013) + for iter1041 in self.success: + oprot.writeString(iter1041) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23783,11 +23846,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1017, _size1014) = iprot.readListBegin() - for _i1018 in xrange(_size1014): - _elem1019 = Partition() - _elem1019.read(iprot) - self.new_parts.append(_elem1019) + (_etype1045, _size1042) = iprot.readListBegin() + for _i1046 in xrange(_size1042): + _elem1047 = Partition() + _elem1047.read(iprot) + self.new_parts.append(_elem1047) iprot.readListEnd() else: iprot.skip(ftype) @@ -23804,8 +23867,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1020 in self.new_parts: - iter1020.write(oprot) + for iter1048 in self.new_parts: + iter1048.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23963,11 +24026,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1024, _size1021) = iprot.readListBegin() - for _i1025 in xrange(_size1021): - _elem1026 = PartitionSpec() - _elem1026.read(iprot) - self.new_parts.append(_elem1026) + (_etype1052, _size1049) = iprot.readListBegin() + for _i1053 in xrange(_size1049): + _elem1054 = PartitionSpec() + _elem1054.read(iprot) + self.new_parts.append(_elem1054) iprot.readListEnd() else: iprot.skip(ftype) @@ -23984,8 +24047,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1027 in self.new_parts: - iter1027.write(oprot) + for iter1055 in self.new_parts: + iter1055.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24159,10 +24222,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1031, _size1028) = iprot.readListBegin() - for _i1032 in xrange(_size1028): - _elem1033 = iprot.readString() - self.part_vals.append(_elem1033) + (_etype1059, _size1056) = iprot.readListBegin() + for _i1060 in xrange(_size1056): + _elem1061 = iprot.readString() + self.part_vals.append(_elem1061) iprot.readListEnd() else: iprot.skip(ftype) @@ -24187,8 +24250,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1034 in self.part_vals: - oprot.writeString(iter1034) + for iter1062 in self.part_vals: + oprot.writeString(iter1062) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24541,10 +24604,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1038, _size1035) = iprot.readListBegin() - for _i1039 in xrange(_size1035): - _elem1040 = iprot.readString() - self.part_vals.append(_elem1040) + (_etype1066, _size1063) = iprot.readListBegin() + for _i1067 in xrange(_size1063): + _elem1068 = iprot.readString() + self.part_vals.append(_elem1068) iprot.readListEnd() else: iprot.skip(ftype) @@ -24575,8 +24638,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1041 in self.part_vals: - oprot.writeString(iter1041) + for iter1069 in self.part_vals: + oprot.writeString(iter1069) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -25171,10 +25234,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1045, _size1042) = iprot.readListBegin() - for _i1046 in xrange(_size1042): - _elem1047 = iprot.readString() - self.part_vals.append(_elem1047) + (_etype1073, _size1070) = iprot.readListBegin() + for _i1074 in xrange(_size1070): + _elem1075 = iprot.readString() + self.part_vals.append(_elem1075) iprot.readListEnd() else: iprot.skip(ftype) @@ -25204,8 +25267,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1048 in self.part_vals: - oprot.writeString(iter1048) + for iter1076 in self.part_vals: + oprot.writeString(iter1076) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -25378,10 +25441,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1052, _size1049) = iprot.readListBegin() - for _i1053 in xrange(_size1049): - _elem1054 = iprot.readString() - self.part_vals.append(_elem1054) + (_etype1080, _size1077) = iprot.readListBegin() + for _i1081 in xrange(_size1077): + _elem1082 = iprot.readString() + self.part_vals.append(_elem1082) iprot.readListEnd() else: iprot.skip(ftype) @@ -25417,8 +25480,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1055 in self.part_vals: - oprot.writeString(iter1055) + for iter1083 in self.part_vals: + oprot.writeString(iter1083) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -26155,10 +26218,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1059, _size1056) = iprot.readListBegin() - for _i1060 in xrange(_size1056): - _elem1061 = iprot.readString() - self.part_vals.append(_elem1061) + (_etype1087, _size1084) = iprot.readListBegin() + for _i1088 in xrange(_size1084): + _elem1089 = iprot.readString() + self.part_vals.append(_elem1089) iprot.readListEnd() else: iprot.skip(ftype) @@ -26183,8 +26246,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1062 in self.part_vals: - oprot.writeString(iter1062) + for iter1090 in self.part_vals: + oprot.writeString(iter1090) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26343,11 +26406,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1064, _vtype1065, _size1063 ) = iprot.readMapBegin() - for _i1067 in xrange(_size1063): - _key1068 = iprot.readString() - _val1069 = iprot.readString() - self.partitionSpecs[_key1068] = _val1069 + (_ktype1092, _vtype1093, _size1091 ) = iprot.readMapBegin() + for _i1095 in xrange(_size1091): + _key1096 = iprot.readString() + _val1097 = iprot.readString() + self.partitionSpecs[_key1096] = _val1097 iprot.readMapEnd() else: iprot.skip(ftype) @@ -26384,9 +26447,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1070,viter1071 in self.partitionSpecs.items(): - oprot.writeString(kiter1070) - oprot.writeString(viter1071) + for kiter1098,viter1099 in self.partitionSpecs.items(): + oprot.writeString(kiter1098) + oprot.writeString(viter1099) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -26591,11 +26654,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1073, _vtype1074, _size1072 ) = iprot.readMapBegin() - for _i1076 in xrange(_size1072): - _key1077 = iprot.readString() - _val1078 = iprot.readString() - self.partitionSpecs[_key1077] = _val1078 + (_ktype1101, _vtype1102, _size1100 ) = iprot.readMapBegin() + for _i1104 in xrange(_size1100): + _key1105 = iprot.readString() + _val1106 = iprot.readString() + self.partitionSpecs[_key1105] = _val1106 iprot.readMapEnd() else: iprot.skip(ftype) @@ -26632,9 +26695,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1079,viter1080 in self.partitionSpecs.items(): - oprot.writeString(kiter1079) - oprot.writeString(viter1080) + for kiter1107,viter1108 in self.partitionSpecs.items(): + oprot.writeString(kiter1107) + oprot.writeString(viter1108) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -26717,11 +26780,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1084, _size1081) = iprot.readListBegin() - for _i1085 in xrange(_size1081): - _elem1086 = Partition() - _elem1086.read(iprot) - self.success.append(_elem1086) + (_etype1112, _size1109) = iprot.readListBegin() + for _i1113 in xrange(_size1109): + _elem1114 = Partition() + _elem1114.read(iprot) + self.success.append(_elem1114) iprot.readListEnd() else: iprot.skip(ftype) @@ -26762,8 +26825,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1087 in self.success: - iter1087.write(oprot) + for iter1115 in self.success: + iter1115.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26857,10 +26920,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1091, _size1088) = iprot.readListBegin() - for _i1092 in xrange(_size1088): - _elem1093 = iprot.readString() - self.part_vals.append(_elem1093) + (_etype1119, _size1116) = iprot.readListBegin() + for _i1120 in xrange(_size1116): + _elem1121 = iprot.readString() + self.part_vals.append(_elem1121) iprot.readListEnd() else: iprot.skip(ftype) @@ -26872,10 +26935,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1097, _size1094) = iprot.readListBegin() - for _i1098 in xrange(_size1094): - _elem1099 = iprot.readString() - self.group_names.append(_elem1099) + (_etype1125, _size1122) = iprot.readListBegin() + for _i1126 in xrange(_size1122): + _elem1127 = iprot.readString() + self.group_names.append(_elem1127) iprot.readListEnd() else: iprot.skip(ftype) @@ -26900,8 +26963,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1100 in self.part_vals: - oprot.writeString(iter1100) + for iter1128 in self.part_vals: + oprot.writeString(iter1128) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -26911,8 +26974,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1101 in self.group_names: - oprot.writeString(iter1101) + for iter1129 in self.group_names: + oprot.writeString(iter1129) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27341,11 +27404,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1105, _size1102) = iprot.readListBegin() - for _i1106 in xrange(_size1102): - _elem1107 = Partition() - _elem1107.read(iprot) - self.success.append(_elem1107) + (_etype1133, _size1130) = iprot.readListBegin() + for _i1134 in xrange(_size1130): + _elem1135 = Partition() + _elem1135.read(iprot) + self.success.append(_elem1135) iprot.readListEnd() else: iprot.skip(ftype) @@ -27374,8 +27437,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1108 in self.success: - iter1108.write(oprot) + for iter1136 in self.success: + iter1136.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27469,10 +27532,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1112, _size1109) = iprot.readListBegin() - for _i1113 in xrange(_size1109): - _elem1114 = iprot.readString() - self.group_names.append(_elem1114) + (_etype1140, _size1137) = iprot.readListBegin() + for _i1141 in xrange(_size1137): + _elem1142 = iprot.readString() + self.group_names.append(_elem1142) iprot.readListEnd() else: iprot.skip(ftype) @@ -27505,8 +27568,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1115 in self.group_names: - oprot.writeString(iter1115) + for iter1143 in self.group_names: + oprot.writeString(iter1143) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27567,11 +27630,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1119, _size1116) = iprot.readListBegin() - for _i1120 in xrange(_size1116): - _elem1121 = Partition() - _elem1121.read(iprot) - self.success.append(_elem1121) + (_etype1147, _size1144) = iprot.readListBegin() + for _i1148 in xrange(_size1144): + _elem1149 = Partition() + _elem1149.read(iprot) + self.success.append(_elem1149) iprot.readListEnd() else: iprot.skip(ftype) @@ -27600,8 +27663,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1122 in self.success: - iter1122.write(oprot) + for iter1150 in self.success: + iter1150.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27759,11 +27822,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1126, _size1123) = iprot.readListBegin() - for _i1127 in xrange(_size1123): - _elem1128 = PartitionSpec() - _elem1128.read(iprot) - self.success.append(_elem1128) + (_etype1154, _size1151) = iprot.readListBegin() + for _i1155 in xrange(_size1151): + _elem1156 = PartitionSpec() + _elem1156.read(iprot) + self.success.append(_elem1156) iprot.readListEnd() else: iprot.skip(ftype) @@ -27792,8 +27855,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1129 in self.success: - iter1129.write(oprot) + for iter1157 in self.success: + iter1157.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27951,10 +28014,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1133, _size1130) = iprot.readListBegin() - for _i1134 in xrange(_size1130): - _elem1135 = iprot.readString() - self.success.append(_elem1135) + (_etype1161, _size1158) = iprot.readListBegin() + for _i1162 in xrange(_size1158): + _elem1163 = iprot.readString() + self.success.append(_elem1163) iprot.readListEnd() else: iprot.skip(ftype) @@ -27983,8 +28046,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1136 in self.success: - oprot.writeString(iter1136) + for iter1164 in self.success: + oprot.writeString(iter1164) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28224,10 +28287,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1140, _size1137) = iprot.readListBegin() - for _i1141 in xrange(_size1137): - _elem1142 = iprot.readString() - self.part_vals.append(_elem1142) + (_etype1168, _size1165) = iprot.readListBegin() + for _i1169 in xrange(_size1165): + _elem1170 = iprot.readString() + self.part_vals.append(_elem1170) iprot.readListEnd() else: iprot.skip(ftype) @@ -28257,8 +28320,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1143 in self.part_vals: - oprot.writeString(iter1143) + for iter1171 in self.part_vals: + oprot.writeString(iter1171) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28322,11 +28385,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1147, _size1144) = iprot.readListBegin() - for _i1148 in xrange(_size1144): - _elem1149 = Partition() - _elem1149.read(iprot) - self.success.append(_elem1149) + (_etype1175, _size1172) = iprot.readListBegin() + for _i1176 in xrange(_size1172): + _elem1177 = Partition() + _elem1177.read(iprot) + self.success.append(_elem1177) iprot.readListEnd() else: iprot.skip(ftype) @@ -28355,8 +28418,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1150 in self.success: - iter1150.write(oprot) + for iter1178 in self.success: + iter1178.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28443,10 +28506,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1154, _size1151) = iprot.readListBegin() - for _i1155 in xrange(_size1151): - _elem1156 = iprot.readString() - self.part_vals.append(_elem1156) + (_etype1182, _size1179) = iprot.readListBegin() + for _i1183 in xrange(_size1179): + _elem1184 = iprot.readString() + self.part_vals.append(_elem1184) iprot.readListEnd() else: iprot.skip(ftype) @@ -28463,10 +28526,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1160, _size1157) = iprot.readListBegin() - for _i1161 in xrange(_size1157): - _elem1162 = iprot.readString() - self.group_names.append(_elem1162) + (_etype1188, _size1185) = iprot.readListBegin() + for _i1189 in xrange(_size1185): + _elem1190 = iprot.readString() + self.group_names.append(_elem1190) iprot.readListEnd() else: iprot.skip(ftype) @@ -28491,8 +28554,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1163 in self.part_vals: - oprot.writeString(iter1163) + for iter1191 in self.part_vals: + oprot.writeString(iter1191) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28506,8 +28569,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1164 in self.group_names: - oprot.writeString(iter1164) + for iter1192 in self.group_names: + oprot.writeString(iter1192) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28569,11 +28632,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1168, _size1165) = iprot.readListBegin() - for _i1169 in xrange(_size1165): - _elem1170 = Partition() - _elem1170.read(iprot) - self.success.append(_elem1170) + (_etype1196, _size1193) = iprot.readListBegin() + for _i1197 in xrange(_size1193): + _elem1198 = Partition() + _elem1198.read(iprot) + self.success.append(_elem1198) iprot.readListEnd() else: iprot.skip(ftype) @@ -28602,8 +28665,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1171 in self.success: - iter1171.write(oprot) + for iter1199 in self.success: + iter1199.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28684,10 +28747,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1175, _size1172) = iprot.readListBegin() - for _i1176 in xrange(_size1172): - _elem1177 = iprot.readString() - self.part_vals.append(_elem1177) + (_etype1203, _size1200) = iprot.readListBegin() + for _i1204 in xrange(_size1200): + _elem1205 = iprot.readString() + self.part_vals.append(_elem1205) iprot.readListEnd() else: iprot.skip(ftype) @@ -28717,8 +28780,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1178 in self.part_vals: - oprot.writeString(iter1178) + for iter1206 in self.part_vals: + oprot.writeString(iter1206) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -28782,10 +28845,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1182, _size1179) = iprot.readListBegin() - for _i1183 in xrange(_size1179): - _elem1184 = iprot.readString() - self.success.append(_elem1184) + (_etype1210, _size1207) = iprot.readListBegin() + for _i1211 in xrange(_size1207): + _elem1212 = iprot.readString() + self.success.append(_elem1212) iprot.readListEnd() else: iprot.skip(ftype) @@ -28814,8 +28877,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1185 in self.success: - oprot.writeString(iter1185) + for iter1213 in self.success: + oprot.writeString(iter1213) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28986,11 +29049,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1189, _size1186) = iprot.readListBegin() - for _i1190 in xrange(_size1186): - _elem1191 = Partition() - _elem1191.read(iprot) - self.success.append(_elem1191) + (_etype1217, _size1214) = iprot.readListBegin() + for _i1218 in xrange(_size1214): + _elem1219 = Partition() + _elem1219.read(iprot) + self.success.append(_elem1219) iprot.readListEnd() else: iprot.skip(ftype) @@ -29019,8 +29082,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1192 in self.success: - iter1192.write(oprot) + for iter1220 in self.success: + iter1220.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29191,11 +29254,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1196, _size1193) = iprot.readListBegin() - for _i1197 in xrange(_size1193): - _elem1198 = PartitionSpec() - _elem1198.read(iprot) - self.success.append(_elem1198) + (_etype1224, _size1221) = iprot.readListBegin() + for _i1225 in xrange(_size1221): + _elem1226 = PartitionSpec() + _elem1226.read(iprot) + self.success.append(_elem1226) iprot.readListEnd() else: iprot.skip(ftype) @@ -29224,8 +29287,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1199 in self.success: - iter1199.write(oprot) + for iter1227 in self.success: + iter1227.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29645,10 +29708,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1203, _size1200) = iprot.readListBegin() - for _i1204 in xrange(_size1200): - _elem1205 = iprot.readString() - self.names.append(_elem1205) + (_etype1231, _size1228) = iprot.readListBegin() + for _i1232 in xrange(_size1228): + _elem1233 = iprot.readString() + self.names.append(_elem1233) iprot.readListEnd() else: iprot.skip(ftype) @@ -29673,8 +29736,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1206 in self.names: - oprot.writeString(iter1206) + for iter1234 in self.names: + oprot.writeString(iter1234) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29733,11 +29796,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1210, _size1207) = iprot.readListBegin() - for _i1211 in xrange(_size1207): - _elem1212 = Partition() - _elem1212.read(iprot) - self.success.append(_elem1212) + (_etype1238, _size1235) = iprot.readListBegin() + for _i1239 in xrange(_size1235): + _elem1240 = Partition() + _elem1240.read(iprot) + self.success.append(_elem1240) iprot.readListEnd() else: iprot.skip(ftype) @@ -29766,8 +29829,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1213 in self.success: - iter1213.write(oprot) + for iter1241 in self.success: + iter1241.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30017,11 +30080,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1217, _size1214) = iprot.readListBegin() - for _i1218 in xrange(_size1214): - _elem1219 = Partition() - _elem1219.read(iprot) - self.new_parts.append(_elem1219) + (_etype1245, _size1242) = iprot.readListBegin() + for _i1246 in xrange(_size1242): + _elem1247 = Partition() + _elem1247.read(iprot) + self.new_parts.append(_elem1247) iprot.readListEnd() else: iprot.skip(ftype) @@ -30046,8 +30109,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1220 in self.new_parts: - iter1220.write(oprot) + for iter1248 in self.new_parts: + iter1248.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30200,11 +30263,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1224, _size1221) = iprot.readListBegin() - for _i1225 in xrange(_size1221): - _elem1226 = Partition() - _elem1226.read(iprot) - self.new_parts.append(_elem1226) + (_etype1252, _size1249) = iprot.readListBegin() + for _i1253 in xrange(_size1249): + _elem1254 = Partition() + _elem1254.read(iprot) + self.new_parts.append(_elem1254) iprot.readListEnd() else: iprot.skip(ftype) @@ -30235,8 +30298,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1227 in self.new_parts: - iter1227.write(oprot) + for iter1255 in self.new_parts: + iter1255.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -30739,10 +30802,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1231, _size1228) = iprot.readListBegin() - for _i1232 in xrange(_size1228): - _elem1233 = iprot.readString() - self.part_vals.append(_elem1233) + (_etype1259, _size1256) = iprot.readListBegin() + for _i1260 in xrange(_size1256): + _elem1261 = iprot.readString() + self.part_vals.append(_elem1261) iprot.readListEnd() else: iprot.skip(ftype) @@ -30773,8 +30836,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1234 in self.part_vals: - oprot.writeString(iter1234) + for iter1262 in self.part_vals: + oprot.writeString(iter1262) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -31075,10 +31138,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1238, _size1235) = iprot.readListBegin() - for _i1239 in xrange(_size1235): - _elem1240 = iprot.readString() - self.part_vals.append(_elem1240) + (_etype1266, _size1263) = iprot.readListBegin() + for _i1267 in xrange(_size1263): + _elem1268 = iprot.readString() + self.part_vals.append(_elem1268) iprot.readListEnd() else: iprot.skip(ftype) @@ -31100,8 +31163,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1241 in self.part_vals: - oprot.writeString(iter1241) + for iter1269 in self.part_vals: + oprot.writeString(iter1269) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -31459,10 +31522,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1245, _size1242) = iprot.readListBegin() - for _i1246 in xrange(_size1242): - _elem1247 = iprot.readString() - self.success.append(_elem1247) + (_etype1273, _size1270) = iprot.readListBegin() + for _i1274 in xrange(_size1270): + _elem1275 = iprot.readString() + self.success.append(_elem1275) iprot.readListEnd() else: iprot.skip(ftype) @@ -31485,8 +31548,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1248 in self.success: - oprot.writeString(iter1248) + for iter1276 in self.success: + oprot.writeString(iter1276) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31610,11 +31673,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1250, _vtype1251, _size1249 ) = iprot.readMapBegin() - for _i1253 in xrange(_size1249): - _key1254 = iprot.readString() - _val1255 = iprot.readString() - self.success[_key1254] = _val1255 + (_ktype1278, _vtype1279, _size1277 ) = iprot.readMapBegin() + for _i1281 in xrange(_size1277): + _key1282 = iprot.readString() + _val1283 = iprot.readString() + self.success[_key1282] = _val1283 iprot.readMapEnd() else: iprot.skip(ftype) @@ -31637,9 +31700,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1256,viter1257 in self.success.items(): - oprot.writeString(kiter1256) - oprot.writeString(viter1257) + for kiter1284,viter1285 in self.success.items(): + oprot.writeString(kiter1284) + oprot.writeString(viter1285) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31715,11 +31778,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1259, _vtype1260, _size1258 ) = iprot.readMapBegin() - for _i1262 in xrange(_size1258): - _key1263 = iprot.readString() - _val1264 = iprot.readString() - self.part_vals[_key1263] = _val1264 + (_ktype1287, _vtype1288, _size1286 ) = iprot.readMapBegin() + for _i1290 in xrange(_size1286): + _key1291 = iprot.readString() + _val1292 = iprot.readString() + self.part_vals[_key1291] = _val1292 iprot.readMapEnd() else: iprot.skip(ftype) @@ -31749,9 +31812,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1265,viter1266 in self.part_vals.items(): - oprot.writeString(kiter1265) - oprot.writeString(viter1266) + for kiter1293,viter1294 in self.part_vals.items(): + oprot.writeString(kiter1293) + oprot.writeString(viter1294) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -31965,11 +32028,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1268, _vtype1269, _size1267 ) = iprot.readMapBegin() - for _i1271 in xrange(_size1267): - _key1272 = iprot.readString() - _val1273 = iprot.readString() - self.part_vals[_key1272] = _val1273 + (_ktype1296, _vtype1297, _size1295 ) = iprot.readMapBegin() + for _i1299 in xrange(_size1295): + _key1300 = iprot.readString() + _val1301 = iprot.readString() + self.part_vals[_key1300] = _val1301 iprot.readMapEnd() else: iprot.skip(ftype) @@ -31999,9 +32062,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1274,viter1275 in self.part_vals.items(): - oprot.writeString(kiter1274) - oprot.writeString(viter1275) + for kiter1302,viter1303 in self.part_vals.items(): + oprot.writeString(kiter1302) + oprot.writeString(viter1303) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -36027,10 +36090,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1279, _size1276) = iprot.readListBegin() - for _i1280 in xrange(_size1276): - _elem1281 = iprot.readString() - self.success.append(_elem1281) + (_etype1307, _size1304) = iprot.readListBegin() + for _i1308 in xrange(_size1304): + _elem1309 = iprot.readString() + self.success.append(_elem1309) iprot.readListEnd() else: iprot.skip(ftype) @@ -36053,8 +36116,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1282 in self.success: - oprot.writeString(iter1282) + for iter1310 in self.success: + oprot.writeString(iter1310) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36742,10 +36805,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1286, _size1283) = iprot.readListBegin() - for _i1287 in xrange(_size1283): - _elem1288 = iprot.readString() - self.success.append(_elem1288) + (_etype1314, _size1311) = iprot.readListBegin() + for _i1315 in xrange(_size1311): + _elem1316 = iprot.readString() + self.success.append(_elem1316) iprot.readListEnd() else: iprot.skip(ftype) @@ -36768,8 +36831,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1289 in self.success: - oprot.writeString(iter1289) + for iter1317 in self.success: + oprot.writeString(iter1317) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37283,11 +37346,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1293, _size1290) = iprot.readListBegin() - for _i1294 in xrange(_size1290): - _elem1295 = Role() - _elem1295.read(iprot) - self.success.append(_elem1295) + (_etype1321, _size1318) = iprot.readListBegin() + for _i1322 in xrange(_size1318): + _elem1323 = Role() + _elem1323.read(iprot) + self.success.append(_elem1323) iprot.readListEnd() else: iprot.skip(ftype) @@ -37310,8 +37373,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1296 in self.success: - iter1296.write(oprot) + for iter1324 in self.success: + iter1324.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37820,10 +37883,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1300, _size1297) = iprot.readListBegin() - for _i1301 in xrange(_size1297): - _elem1302 = iprot.readString() - self.group_names.append(_elem1302) + (_etype1328, _size1325) = iprot.readListBegin() + for _i1329 in xrange(_size1325): + _elem1330 = iprot.readString() + self.group_names.append(_elem1330) iprot.readListEnd() else: iprot.skip(ftype) @@ -37848,8 +37911,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1303 in self.group_names: - oprot.writeString(iter1303) + for iter1331 in self.group_names: + oprot.writeString(iter1331) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38076,11 +38139,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1307, _size1304) = iprot.readListBegin() - for _i1308 in xrange(_size1304): - _elem1309 = HiveObjectPrivilege() - _elem1309.read(iprot) - self.success.append(_elem1309) + (_etype1335, _size1332) = iprot.readListBegin() + for _i1336 in xrange(_size1332): + _elem1337 = HiveObjectPrivilege() + _elem1337.read(iprot) + self.success.append(_elem1337) iprot.readListEnd() else: iprot.skip(ftype) @@ -38103,8 +38166,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1310 in self.success: - iter1310.write(oprot) + for iter1338 in self.success: + iter1338.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38774,10 +38837,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1314, _size1311) = iprot.readListBegin() - for _i1315 in xrange(_size1311): - _elem1316 = iprot.readString() - self.group_names.append(_elem1316) + (_etype1342, _size1339) = iprot.readListBegin() + for _i1343 in xrange(_size1339): + _elem1344 = iprot.readString() + self.group_names.append(_elem1344) iprot.readListEnd() else: iprot.skip(ftype) @@ -38798,8 +38861,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1317 in self.group_names: - oprot.writeString(iter1317) + for iter1345 in self.group_names: + oprot.writeString(iter1345) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38854,10 +38917,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1321, _size1318) = iprot.readListBegin() - for _i1322 in xrange(_size1318): - _elem1323 = iprot.readString() - self.success.append(_elem1323) + (_etype1349, _size1346) = iprot.readListBegin() + for _i1350 in xrange(_size1346): + _elem1351 = iprot.readString() + self.success.append(_elem1351) iprot.readListEnd() else: iprot.skip(ftype) @@ -38880,8 +38943,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1324 in self.success: - oprot.writeString(iter1324) + for iter1352 in self.success: + oprot.writeString(iter1352) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39813,10 +39876,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1328, _size1325) = iprot.readListBegin() - for _i1329 in xrange(_size1325): - _elem1330 = iprot.readString() - self.success.append(_elem1330) + (_etype1356, _size1353) = iprot.readListBegin() + for _i1357 in xrange(_size1353): + _elem1358 = iprot.readString() + self.success.append(_elem1358) iprot.readListEnd() else: iprot.skip(ftype) @@ -39833,8 +39896,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1331 in self.success: - oprot.writeString(iter1331) + for iter1359 in self.success: + oprot.writeString(iter1359) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -40361,10 +40424,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1335, _size1332) = iprot.readListBegin() - for _i1336 in xrange(_size1332): - _elem1337 = iprot.readString() - self.success.append(_elem1337) + (_etype1363, _size1360) = iprot.readListBegin() + for _i1364 in xrange(_size1360): + _elem1365 = iprot.readString() + self.success.append(_elem1365) iprot.readListEnd() else: iprot.skip(ftype) @@ -40381,8 +40444,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1338 in self.success: - oprot.writeString(iter1338) + for iter1366 in self.success: + oprot.writeString(iter1366) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -48789,11 +48852,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1342, _size1339) = iprot.readListBegin() - for _i1343 in xrange(_size1339): - _elem1344 = SchemaVersion() - _elem1344.read(iprot) - self.success.append(_elem1344) + (_etype1370, _size1367) = iprot.readListBegin() + for _i1371 in xrange(_size1367): + _elem1372 = SchemaVersion() + _elem1372.read(iprot) + self.success.append(_elem1372) iprot.readListEnd() else: iprot.skip(ftype) @@ -48822,8 +48885,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1345 in self.success: - iter1345.write(oprot) + for iter1373 in self.success: + iter1373.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -50298,11 +50361,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1349, _size1346) = iprot.readListBegin() - for _i1350 in xrange(_size1346): - _elem1351 = RuntimeStat() - _elem1351.read(iprot) - self.success.append(_elem1351) + (_etype1377, _size1374) = iprot.readListBegin() + for _i1378 in xrange(_size1374): + _elem1379 = RuntimeStat() + _elem1379.read(iprot) + self.success.append(_elem1379) iprot.readListEnd() else: iprot.skip(ftype) @@ -50325,8 +50388,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1352 in self.success: - iter1352.write(oprot) + for iter1380 in self.success: + iter1380.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -50356,3 +50419,148 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + +class get_partitions_with_specs_args: + """ + Attributes: + - request + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (GetPartitionsRequest, GetPartitionsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, request=None,): + self.request = request + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = GetPartitionsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partitions_with_specs_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.request) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_partitions_with_specs_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetPartitionsResponse, GetPartitionsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetPartitionsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partitions_with_specs_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 7fc1e43de03eac1cfe802439ba38f83988299169..005c9a5bce78c2bd58b337223f49e54d8a7ba49e 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -305,6 +305,23 @@ class SchemaVersionState: "DELETED": 8, } +class PartitionFilterMode: + BY_NAMES = 0 + BY_VALUES = 1 + BY_EXPR = 2 + + _VALUES_TO_NAMES = { + 0: "BY_NAMES", + 1: "BY_VALUES", + 2: "BY_EXPR", + } + + _NAMES_TO_VALUES = { + "BY_NAMES": 0, + "BY_VALUES": 1, + "BY_EXPR": 2, + } + class FunctionType: JAVA = 1 @@ -5253,6 +5270,418 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class GetPartitionsProjectSpec: + """ + Attributes: + - fieldList + - paramKeyPattern + - excludeParamKeyPattern + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'fieldList', (TType.STRING,None), None, ), # 1 + (2, TType.STRING, 'paramKeyPattern', None, None, ), # 2 + (3, TType.BOOL, 'excludeParamKeyPattern', None, None, ), # 3 + ) + + def __init__(self, fieldList=None, paramKeyPattern=None, excludeParamKeyPattern=None,): + self.fieldList = fieldList + self.paramKeyPattern = paramKeyPattern + self.excludeParamKeyPattern = excludeParamKeyPattern + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.fieldList = [] + (_etype217, _size214) = iprot.readListBegin() + for _i218 in xrange(_size214): + _elem219 = iprot.readString() + self.fieldList.append(_elem219) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.paramKeyPattern = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.excludeParamKeyPattern = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetPartitionsProjectSpec') + if self.fieldList is not None: + oprot.writeFieldBegin('fieldList', TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.fieldList)) + for iter220 in self.fieldList: + oprot.writeString(iter220) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.paramKeyPattern is not None: + oprot.writeFieldBegin('paramKeyPattern', TType.STRING, 2) + oprot.writeString(self.paramKeyPattern) + oprot.writeFieldEnd() + if self.excludeParamKeyPattern is not None: + oprot.writeFieldBegin('excludeParamKeyPattern', TType.BOOL, 3) + oprot.writeBool(self.excludeParamKeyPattern) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.fieldList) + value = (value * 31) ^ hash(self.paramKeyPattern) + value = (value * 31) ^ hash(self.excludeParamKeyPattern) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetPartitionsFilterSpec: + """ + Attributes: + - dbName + - tblName + - withAuth + - user + - groupNames + - filterMode + - filters + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + (3, TType.BOOL, 'withAuth', None, None, ), # 3 + (4, TType.STRING, 'user', None, None, ), # 4 + (5, TType.LIST, 'groupNames', (TType.STRING,None), None, ), # 5 + (6, TType.I32, 'filterMode', None, None, ), # 6 + (7, TType.LIST, 'filters', (TType.STRING,None), None, ), # 7 + ) + + def __init__(self, dbName=None, tblName=None, withAuth=None, user=None, groupNames=None, filterMode=None, filters=None,): + self.dbName = dbName + self.tblName = tblName + self.withAuth = withAuth + self.user = user + self.groupNames = groupNames + self.filterMode = filterMode + self.filters = filters + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.withAuth = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.user = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.LIST: + self.groupNames = [] + (_etype224, _size221) = iprot.readListBegin() + for _i225 in xrange(_size221): + _elem226 = iprot.readString() + self.groupNames.append(_elem226) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.filterMode = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.LIST: + self.filters = [] + (_etype230, _size227) = iprot.readListBegin() + for _i231 in xrange(_size227): + _elem232 = iprot.readString() + self.filters.append(_elem232) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetPartitionsFilterSpec') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + if self.withAuth is not None: + oprot.writeFieldBegin('withAuth', TType.BOOL, 3) + oprot.writeBool(self.withAuth) + oprot.writeFieldEnd() + if self.user is not None: + oprot.writeFieldBegin('user', TType.STRING, 4) + oprot.writeString(self.user) + oprot.writeFieldEnd() + if self.groupNames is not None: + oprot.writeFieldBegin('groupNames', TType.LIST, 5) + oprot.writeListBegin(TType.STRING, len(self.groupNames)) + for iter233 in self.groupNames: + oprot.writeString(iter233) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.filterMode is not None: + oprot.writeFieldBegin('filterMode', TType.I32, 6) + oprot.writeI32(self.filterMode) + oprot.writeFieldEnd() + if self.filters is not None: + oprot.writeFieldBegin('filters', TType.LIST, 7) + oprot.writeListBegin(TType.STRING, len(self.filters)) + for iter234 in self.filters: + oprot.writeString(iter234) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + value = (value * 31) ^ hash(self.withAuth) + value = (value * 31) ^ hash(self.user) + value = (value * 31) ^ hash(self.groupNames) + value = (value * 31) ^ hash(self.filterMode) + value = (value * 31) ^ hash(self.filters) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetPartitionsResponse: + """ + Attributes: + - partitionSpec + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'partitionSpec', (TType.STRUCT,(PartitionSpec, PartitionSpec.thrift_spec)), None, ), # 1 + ) + + def __init__(self, partitionSpec=None,): + self.partitionSpec = partitionSpec + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.partitionSpec = [] + (_etype238, _size235) = iprot.readListBegin() + for _i239 in xrange(_size235): + _elem240 = PartitionSpec() + _elem240.read(iprot) + self.partitionSpec.append(_elem240) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetPartitionsResponse') + if self.partitionSpec is not None: + oprot.writeFieldBegin('partitionSpec', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.partitionSpec)) + for iter241 in self.partitionSpec: + iter241.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.partitionSpec) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetPartitionsRequest: + """ + Attributes: + - projectionSpec + - filterSpec + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'projectionSpec', (GetPartitionsProjectSpec, GetPartitionsProjectSpec.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'filterSpec', (GetPartitionsFilterSpec, GetPartitionsFilterSpec.thrift_spec), None, ), # 2 + ) + + def __init__(self, projectionSpec=None, filterSpec=None,): + self.projectionSpec = projectionSpec + self.filterSpec = filterSpec + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.projectionSpec = GetPartitionsProjectSpec() + self.projectionSpec.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.filterSpec = GetPartitionsFilterSpec() + self.filterSpec.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetPartitionsRequest') + if self.projectionSpec is not None: + oprot.writeFieldBegin('projectionSpec', TType.STRUCT, 1) + self.projectionSpec.write(oprot) + oprot.writeFieldEnd() + if self.filterSpec is not None: + oprot.writeFieldBegin('filterSpec', TType.STRUCT, 2) + self.filterSpec.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.projectionSpec) + value = (value * 31) ^ hash(self.filterSpec) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class PartitionWithoutSD: """ Attributes: @@ -5294,10 +5723,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype217, _size214) = iprot.readListBegin() - for _i218 in xrange(_size214): - _elem219 = iprot.readString() - self.values.append(_elem219) + (_etype245, _size242) = iprot.readListBegin() + for _i246 in xrange(_size242): + _elem247 = iprot.readString() + self.values.append(_elem247) iprot.readListEnd() else: iprot.skip(ftype) @@ -5319,11 +5748,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.MAP: self.parameters = {} - (_ktype221, _vtype222, _size220 ) = iprot.readMapBegin() - for _i224 in xrange(_size220): - _key225 = iprot.readString() - _val226 = iprot.readString() - self.parameters[_key225] = _val226 + (_ktype249, _vtype250, _size248 ) = iprot.readMapBegin() + for _i252 in xrange(_size248): + _key253 = iprot.readString() + _val254 = iprot.readString() + self.parameters[_key253] = _val254 iprot.readMapEnd() else: iprot.skip(ftype) @@ -5346,8 +5775,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.values)) - for iter227 in self.values: - oprot.writeString(iter227) + for iter255 in self.values: + oprot.writeString(iter255) oprot.writeListEnd() oprot.writeFieldEnd() if self.createTime is not None: @@ -5365,9 +5794,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 5) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter228,viter229 in self.parameters.items(): - oprot.writeString(kiter228) - oprot.writeString(viter229) + for kiter256,viter257 in self.parameters.items(): + oprot.writeString(kiter256) + oprot.writeString(viter257) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -5431,11 +5860,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype233, _size230) = iprot.readListBegin() - for _i234 in xrange(_size230): - _elem235 = PartitionWithoutSD() - _elem235.read(iprot) - self.partitions.append(_elem235) + (_etype261, _size258) = iprot.readListBegin() + for _i262 in xrange(_size258): + _elem263 = PartitionWithoutSD() + _elem263.read(iprot) + self.partitions.append(_elem263) iprot.readListEnd() else: iprot.skip(ftype) @@ -5458,8 +5887,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter236 in self.partitions: - iter236.write(oprot) + for iter264 in self.partitions: + iter264.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.sd is not None: @@ -5516,11 +5945,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype240, _size237) = iprot.readListBegin() - for _i241 in xrange(_size237): - _elem242 = Partition() - _elem242.read(iprot) - self.partitions.append(_elem242) + (_etype268, _size265) = iprot.readListBegin() + for _i269 in xrange(_size265): + _elem270 = Partition() + _elem270.read(iprot) + self.partitions.append(_elem270) iprot.readListEnd() else: iprot.skip(ftype) @@ -5537,8 +5966,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter243 in self.partitions: - iter243.write(oprot) + for iter271 in self.partitions: + iter271.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7127,11 +7556,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.statsObj = [] - (_etype247, _size244) = iprot.readListBegin() - for _i248 in xrange(_size244): - _elem249 = ColumnStatisticsObj() - _elem249.read(iprot) - self.statsObj.append(_elem249) + (_etype275, _size272) = iprot.readListBegin() + for _i276 in xrange(_size272): + _elem277 = ColumnStatisticsObj() + _elem277.read(iprot) + self.statsObj.append(_elem277) iprot.readListEnd() else: iprot.skip(ftype) @@ -7157,8 +7586,8 @@ def write(self, oprot): if self.statsObj is not None: oprot.writeFieldBegin('statsObj', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.statsObj)) - for iter250 in self.statsObj: - iter250.write(oprot) + for iter278 in self.statsObj: + iter278.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.isStatsCompliant is not None: @@ -7226,11 +7655,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.colStats = [] - (_etype254, _size251) = iprot.readListBegin() - for _i255 in xrange(_size251): - _elem256 = ColumnStatisticsObj() - _elem256.read(iprot) - self.colStats.append(_elem256) + (_etype282, _size279) = iprot.readListBegin() + for _i283 in xrange(_size279): + _elem284 = ColumnStatisticsObj() + _elem284.read(iprot) + self.colStats.append(_elem284) iprot.readListEnd() else: iprot.skip(ftype) @@ -7257,8 +7686,8 @@ def write(self, oprot): if self.colStats is not None: oprot.writeFieldBegin('colStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.colStats)) - for iter257 in self.colStats: - iter257.write(oprot) + for iter285 in self.colStats: + iter285.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.partsFound is not None: @@ -7333,11 +7762,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.colStats = [] - (_etype261, _size258) = iprot.readListBegin() - for _i262 in xrange(_size258): - _elem263 = ColumnStatistics() - _elem263.read(iprot) - self.colStats.append(_elem263) + (_etype289, _size286) = iprot.readListBegin() + for _i290 in xrange(_size286): + _elem291 = ColumnStatistics() + _elem291.read(iprot) + self.colStats.append(_elem291) iprot.readListEnd() else: iprot.skip(ftype) @@ -7369,8 +7798,8 @@ def write(self, oprot): if self.colStats is not None: oprot.writeFieldBegin('colStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.colStats)) - for iter264 in self.colStats: - iter264.write(oprot) + for iter292 in self.colStats: + iter292.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.needMerge is not None: @@ -7509,22 +7938,22 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldSchemas = [] - (_etype268, _size265) = iprot.readListBegin() - for _i269 in xrange(_size265): - _elem270 = FieldSchema() - _elem270.read(iprot) - self.fieldSchemas.append(_elem270) + (_etype296, _size293) = iprot.readListBegin() + for _i297 in xrange(_size293): + _elem298 = FieldSchema() + _elem298.read(iprot) + self.fieldSchemas.append(_elem298) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.properties = {} - (_ktype272, _vtype273, _size271 ) = iprot.readMapBegin() - for _i275 in xrange(_size271): - _key276 = iprot.readString() - _val277 = iprot.readString() - self.properties[_key276] = _val277 + (_ktype300, _vtype301, _size299 ) = iprot.readMapBegin() + for _i303 in xrange(_size299): + _key304 = iprot.readString() + _val305 = iprot.readString() + self.properties[_key304] = _val305 iprot.readMapEnd() else: iprot.skip(ftype) @@ -7541,16 +7970,16 @@ def write(self, oprot): if self.fieldSchemas is not None: oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) - for iter278 in self.fieldSchemas: - iter278.write(oprot) + for iter306 in self.fieldSchemas: + iter306.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter279,viter280 in self.properties.items(): - oprot.writeString(kiter279) - oprot.writeString(viter280) + for kiter307,viter308 in self.properties.items(): + oprot.writeString(kiter307) + oprot.writeString(viter308) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7603,11 +8032,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.properties = {} - (_ktype282, _vtype283, _size281 ) = iprot.readMapBegin() - for _i285 in xrange(_size281): - _key286 = iprot.readString() - _val287 = iprot.readString() - self.properties[_key286] = _val287 + (_ktype310, _vtype311, _size309 ) = iprot.readMapBegin() + for _i313 in xrange(_size309): + _key314 = iprot.readString() + _val315 = iprot.readString() + self.properties[_key314] = _val315 iprot.readMapEnd() else: iprot.skip(ftype) @@ -7624,9 +8053,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter288,viter289 in self.properties.items(): - oprot.writeString(kiter288) - oprot.writeString(viter289) + for kiter316,viter317 in self.properties.items(): + oprot.writeString(kiter316) + oprot.writeString(viter317) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7773,11 +8202,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeys = [] - (_etype293, _size290) = iprot.readListBegin() - for _i294 in xrange(_size290): - _elem295 = SQLPrimaryKey() - _elem295.read(iprot) - self.primaryKeys.append(_elem295) + (_etype321, _size318) = iprot.readListBegin() + for _i322 in xrange(_size318): + _elem323 = SQLPrimaryKey() + _elem323.read(iprot) + self.primaryKeys.append(_elem323) iprot.readListEnd() else: iprot.skip(ftype) @@ -7794,8 +8223,8 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter296 in self.primaryKeys: - iter296.write(oprot) + for iter324 in self.primaryKeys: + iter324.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7966,11 +8395,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeys = [] - (_etype300, _size297) = iprot.readListBegin() - for _i301 in xrange(_size297): - _elem302 = SQLForeignKey() - _elem302.read(iprot) - self.foreignKeys.append(_elem302) + (_etype328, _size325) = iprot.readListBegin() + for _i329 in xrange(_size325): + _elem330 = SQLForeignKey() + _elem330.read(iprot) + self.foreignKeys.append(_elem330) iprot.readListEnd() else: iprot.skip(ftype) @@ -7987,8 +8416,8 @@ def write(self, oprot): if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter303 in self.foreignKeys: - iter303.write(oprot) + for iter331 in self.foreignKeys: + iter331.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8139,11 +8568,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype307, _size304) = iprot.readListBegin() - for _i308 in xrange(_size304): - _elem309 = SQLUniqueConstraint() - _elem309.read(iprot) - self.uniqueConstraints.append(_elem309) + (_etype335, _size332) = iprot.readListBegin() + for _i336 in xrange(_size332): + _elem337 = SQLUniqueConstraint() + _elem337.read(iprot) + self.uniqueConstraints.append(_elem337) iprot.readListEnd() else: iprot.skip(ftype) @@ -8160,8 +8589,8 @@ def write(self, oprot): if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter310 in self.uniqueConstraints: - iter310.write(oprot) + for iter338 in self.uniqueConstraints: + iter338.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8312,11 +8741,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype314, _size311) = iprot.readListBegin() - for _i315 in xrange(_size311): - _elem316 = SQLNotNullConstraint() - _elem316.read(iprot) - self.notNullConstraints.append(_elem316) + (_etype342, _size339) = iprot.readListBegin() + for _i343 in xrange(_size339): + _elem344 = SQLNotNullConstraint() + _elem344.read(iprot) + self.notNullConstraints.append(_elem344) iprot.readListEnd() else: iprot.skip(ftype) @@ -8333,8 +8762,8 @@ def write(self, oprot): if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter317 in self.notNullConstraints: - iter317.write(oprot) + for iter345 in self.notNullConstraints: + iter345.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8485,11 +8914,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype321, _size318) = iprot.readListBegin() - for _i322 in xrange(_size318): - _elem323 = SQLDefaultConstraint() - _elem323.read(iprot) - self.defaultConstraints.append(_elem323) + (_etype349, _size346) = iprot.readListBegin() + for _i350 in xrange(_size346): + _elem351 = SQLDefaultConstraint() + _elem351.read(iprot) + self.defaultConstraints.append(_elem351) iprot.readListEnd() else: iprot.skip(ftype) @@ -8506,8 +8935,8 @@ def write(self, oprot): if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter324 in self.defaultConstraints: - iter324.write(oprot) + for iter352 in self.defaultConstraints: + iter352.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8658,11 +9087,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.checkConstraints = [] - (_etype328, _size325) = iprot.readListBegin() - for _i329 in xrange(_size325): - _elem330 = SQLCheckConstraint() - _elem330.read(iprot) - self.checkConstraints.append(_elem330) + (_etype356, _size353) = iprot.readListBegin() + for _i357 in xrange(_size353): + _elem358 = SQLCheckConstraint() + _elem358.read(iprot) + self.checkConstraints.append(_elem358) iprot.readListEnd() else: iprot.skip(ftype) @@ -8679,8 +9108,8 @@ def write(self, oprot): if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter331 in self.checkConstraints: - iter331.write(oprot) + for iter359 in self.checkConstraints: + iter359.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8844,11 +9273,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeyCols = [] - (_etype335, _size332) = iprot.readListBegin() - for _i336 in xrange(_size332): - _elem337 = SQLPrimaryKey() - _elem337.read(iprot) - self.primaryKeyCols.append(_elem337) + (_etype363, _size360) = iprot.readListBegin() + for _i364 in xrange(_size360): + _elem365 = SQLPrimaryKey() + _elem365.read(iprot) + self.primaryKeyCols.append(_elem365) iprot.readListEnd() else: iprot.skip(ftype) @@ -8865,8 +9294,8 @@ def write(self, oprot): if self.primaryKeyCols is not None: oprot.writeFieldBegin('primaryKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeyCols)) - for iter338 in self.primaryKeyCols: - iter338.write(oprot) + for iter366 in self.primaryKeyCols: + iter366.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8920,11 +9349,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeyCols = [] - (_etype342, _size339) = iprot.readListBegin() - for _i343 in xrange(_size339): - _elem344 = SQLForeignKey() - _elem344.read(iprot) - self.foreignKeyCols.append(_elem344) + (_etype370, _size367) = iprot.readListBegin() + for _i371 in xrange(_size367): + _elem372 = SQLForeignKey() + _elem372.read(iprot) + self.foreignKeyCols.append(_elem372) iprot.readListEnd() else: iprot.skip(ftype) @@ -8941,8 +9370,8 @@ def write(self, oprot): if self.foreignKeyCols is not None: oprot.writeFieldBegin('foreignKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeyCols)) - for iter345 in self.foreignKeyCols: - iter345.write(oprot) + for iter373 in self.foreignKeyCols: + iter373.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8996,11 +9425,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.uniqueConstraintCols = [] - (_etype349, _size346) = iprot.readListBegin() - for _i350 in xrange(_size346): - _elem351 = SQLUniqueConstraint() - _elem351.read(iprot) - self.uniqueConstraintCols.append(_elem351) + (_etype377, _size374) = iprot.readListBegin() + for _i378 in xrange(_size374): + _elem379 = SQLUniqueConstraint() + _elem379.read(iprot) + self.uniqueConstraintCols.append(_elem379) iprot.readListEnd() else: iprot.skip(ftype) @@ -9017,8 +9446,8 @@ def write(self, oprot): if self.uniqueConstraintCols is not None: oprot.writeFieldBegin('uniqueConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraintCols)) - for iter352 in self.uniqueConstraintCols: - iter352.write(oprot) + for iter380 in self.uniqueConstraintCols: + iter380.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9072,11 +9501,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.notNullConstraintCols = [] - (_etype356, _size353) = iprot.readListBegin() - for _i357 in xrange(_size353): - _elem358 = SQLNotNullConstraint() - _elem358.read(iprot) - self.notNullConstraintCols.append(_elem358) + (_etype384, _size381) = iprot.readListBegin() + for _i385 in xrange(_size381): + _elem386 = SQLNotNullConstraint() + _elem386.read(iprot) + self.notNullConstraintCols.append(_elem386) iprot.readListEnd() else: iprot.skip(ftype) @@ -9093,8 +9522,8 @@ def write(self, oprot): if self.notNullConstraintCols is not None: oprot.writeFieldBegin('notNullConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraintCols)) - for iter359 in self.notNullConstraintCols: - iter359.write(oprot) + for iter387 in self.notNullConstraintCols: + iter387.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9148,11 +9577,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.defaultConstraintCols = [] - (_etype363, _size360) = iprot.readListBegin() - for _i364 in xrange(_size360): - _elem365 = SQLDefaultConstraint() - _elem365.read(iprot) - self.defaultConstraintCols.append(_elem365) + (_etype391, _size388) = iprot.readListBegin() + for _i392 in xrange(_size388): + _elem393 = SQLDefaultConstraint() + _elem393.read(iprot) + self.defaultConstraintCols.append(_elem393) iprot.readListEnd() else: iprot.skip(ftype) @@ -9169,8 +9598,8 @@ def write(self, oprot): if self.defaultConstraintCols is not None: oprot.writeFieldBegin('defaultConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraintCols)) - for iter366 in self.defaultConstraintCols: - iter366.write(oprot) + for iter394 in self.defaultConstraintCols: + iter394.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9224,11 +9653,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.checkConstraintCols = [] - (_etype370, _size367) = iprot.readListBegin() - for _i371 in xrange(_size367): - _elem372 = SQLCheckConstraint() - _elem372.read(iprot) - self.checkConstraintCols.append(_elem372) + (_etype398, _size395) = iprot.readListBegin() + for _i399 in xrange(_size395): + _elem400 = SQLCheckConstraint() + _elem400.read(iprot) + self.checkConstraintCols.append(_elem400) iprot.readListEnd() else: iprot.skip(ftype) @@ -9245,8 +9674,8 @@ def write(self, oprot): if self.checkConstraintCols is not None: oprot.writeFieldBegin('checkConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraintCols)) - for iter373 in self.checkConstraintCols: - iter373.write(oprot) + for iter401 in self.checkConstraintCols: + iter401.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9303,11 +9732,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype377, _size374) = iprot.readListBegin() - for _i378 in xrange(_size374): - _elem379 = Partition() - _elem379.read(iprot) - self.partitions.append(_elem379) + (_etype405, _size402) = iprot.readListBegin() + for _i406 in xrange(_size402): + _elem407 = Partition() + _elem407.read(iprot) + self.partitions.append(_elem407) iprot.readListEnd() else: iprot.skip(ftype) @@ -9329,8 +9758,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter380 in self.partitions: - iter380.write(oprot) + for iter408 in self.partitions: + iter408.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.hasUnknownPartitions is not None: @@ -9530,11 +9959,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tableStats = [] - (_etype384, _size381) = iprot.readListBegin() - for _i385 in xrange(_size381): - _elem386 = ColumnStatisticsObj() - _elem386.read(iprot) - self.tableStats.append(_elem386) + (_etype412, _size409) = iprot.readListBegin() + for _i413 in xrange(_size409): + _elem414 = ColumnStatisticsObj() + _elem414.read(iprot) + self.tableStats.append(_elem414) iprot.readListEnd() else: iprot.skip(ftype) @@ -9556,8 +9985,8 @@ def write(self, oprot): if self.tableStats is not None: oprot.writeFieldBegin('tableStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) - for iter387 in self.tableStats: - iter387.write(oprot) + for iter415 in self.tableStats: + iter415.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.isStatsCompliant is not None: @@ -9619,17 +10048,17 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partStats = {} - (_ktype389, _vtype390, _size388 ) = iprot.readMapBegin() - for _i392 in xrange(_size388): - _key393 = iprot.readString() - _val394 = [] - (_etype398, _size395) = iprot.readListBegin() - for _i399 in xrange(_size395): - _elem400 = ColumnStatisticsObj() - _elem400.read(iprot) - _val394.append(_elem400) + (_ktype417, _vtype418, _size416 ) = iprot.readMapBegin() + for _i420 in xrange(_size416): + _key421 = iprot.readString() + _val422 = [] + (_etype426, _size423) = iprot.readListBegin() + for _i427 in xrange(_size423): + _elem428 = ColumnStatisticsObj() + _elem428.read(iprot) + _val422.append(_elem428) iprot.readListEnd() - self.partStats[_key393] = _val394 + self.partStats[_key421] = _val422 iprot.readMapEnd() else: iprot.skip(ftype) @@ -9651,11 +10080,11 @@ def write(self, oprot): if self.partStats is not None: oprot.writeFieldBegin('partStats', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) - for kiter401,viter402 in self.partStats.items(): - oprot.writeString(kiter401) - oprot.writeListBegin(TType.STRUCT, len(viter402)) - for iter403 in viter402: - iter403.write(oprot) + for kiter429,viter430 in self.partStats.items(): + oprot.writeString(kiter429) + oprot.writeListBegin(TType.STRUCT, len(viter430)) + for iter431 in viter430: + iter431.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -9737,10 +10166,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype407, _size404) = iprot.readListBegin() - for _i408 in xrange(_size404): - _elem409 = iprot.readString() - self.colNames.append(_elem409) + (_etype435, _size432) = iprot.readListBegin() + for _i436 in xrange(_size432): + _elem437 = iprot.readString() + self.colNames.append(_elem437) iprot.readListEnd() else: iprot.skip(ftype) @@ -9775,8 +10204,8 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter410 in self.colNames: - oprot.writeString(iter410) + for iter438 in self.colNames: + oprot.writeString(iter438) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -9871,20 +10300,20 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype414, _size411) = iprot.readListBegin() - for _i415 in xrange(_size411): - _elem416 = iprot.readString() - self.colNames.append(_elem416) + (_etype442, _size439) = iprot.readListBegin() + for _i443 in xrange(_size439): + _elem444 = iprot.readString() + self.colNames.append(_elem444) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.partNames = [] - (_etype420, _size417) = iprot.readListBegin() - for _i421 in xrange(_size417): - _elem422 = iprot.readString() - self.partNames.append(_elem422) + (_etype448, _size445) = iprot.readListBegin() + for _i449 in xrange(_size445): + _elem450 = iprot.readString() + self.partNames.append(_elem450) iprot.readListEnd() else: iprot.skip(ftype) @@ -9919,15 +10348,15 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter423 in self.colNames: - oprot.writeString(iter423) + for iter451 in self.colNames: + oprot.writeString(iter451) oprot.writeListEnd() oprot.writeFieldEnd() if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter424 in self.partNames: - oprot.writeString(iter424) + for iter452 in self.partNames: + oprot.writeString(iter452) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -10003,11 +10432,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype428, _size425) = iprot.readListBegin() - for _i429 in xrange(_size425): - _elem430 = Partition() - _elem430.read(iprot) - self.partitions.append(_elem430) + (_etype456, _size453) = iprot.readListBegin() + for _i457 in xrange(_size453): + _elem458 = Partition() + _elem458.read(iprot) + self.partitions.append(_elem458) iprot.readListEnd() else: iprot.skip(ftype) @@ -10029,8 +10458,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter431 in self.partitions: - iter431.write(oprot) + for iter459 in self.partitions: + iter459.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.isStatsCompliant is not None: @@ -10115,11 +10544,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.parts = [] - (_etype435, _size432) = iprot.readListBegin() - for _i436 in xrange(_size432): - _elem437 = Partition() - _elem437.read(iprot) - self.parts.append(_elem437) + (_etype463, _size460) = iprot.readListBegin() + for _i464 in xrange(_size460): + _elem465 = Partition() + _elem465.read(iprot) + self.parts.append(_elem465) iprot.readListEnd() else: iprot.skip(ftype) @@ -10164,8 +10593,8 @@ def write(self, oprot): if self.parts is not None: oprot.writeFieldBegin('parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.parts)) - for iter438 in self.parts: - iter438.write(oprot) + for iter466 in self.parts: + iter466.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ifNotExists is not None: @@ -10247,11 +10676,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype442, _size439) = iprot.readListBegin() - for _i443 in xrange(_size439): - _elem444 = Partition() - _elem444.read(iprot) - self.partitions.append(_elem444) + (_etype470, _size467) = iprot.readListBegin() + for _i471 in xrange(_size467): + _elem472 = Partition() + _elem472.read(iprot) + self.partitions.append(_elem472) iprot.readListEnd() else: iprot.skip(ftype) @@ -10268,8 +10697,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter445 in self.partitions: - iter445.write(oprot) + for iter473 in self.partitions: + iter473.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10404,21 +10833,21 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.names = [] - (_etype449, _size446) = iprot.readListBegin() - for _i450 in xrange(_size446): - _elem451 = iprot.readString() - self.names.append(_elem451) + (_etype477, _size474) = iprot.readListBegin() + for _i478 in xrange(_size474): + _elem479 = iprot.readString() + self.names.append(_elem479) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.exprs = [] - (_etype455, _size452) = iprot.readListBegin() - for _i456 in xrange(_size452): - _elem457 = DropPartitionsExpr() - _elem457.read(iprot) - self.exprs.append(_elem457) + (_etype483, _size480) = iprot.readListBegin() + for _i484 in xrange(_size480): + _elem485 = DropPartitionsExpr() + _elem485.read(iprot) + self.exprs.append(_elem485) iprot.readListEnd() else: iprot.skip(ftype) @@ -10435,15 +10864,15 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter458 in self.names: - oprot.writeString(iter458) + for iter486 in self.names: + oprot.writeString(iter486) oprot.writeListEnd() oprot.writeFieldEnd() if self.exprs is not None: oprot.writeFieldBegin('exprs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.exprs)) - for iter459 in self.exprs: - iter459.write(oprot) + for iter487 in self.exprs: + iter487.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10707,11 +11136,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partitionKeys = [] - (_etype463, _size460) = iprot.readListBegin() - for _i464 in xrange(_size460): - _elem465 = FieldSchema() - _elem465.read(iprot) - self.partitionKeys.append(_elem465) + (_etype491, _size488) = iprot.readListBegin() + for _i492 in xrange(_size488): + _elem493 = FieldSchema() + _elem493.read(iprot) + self.partitionKeys.append(_elem493) iprot.readListEnd() else: iprot.skip(ftype) @@ -10728,11 +11157,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionOrder = [] - (_etype469, _size466) = iprot.readListBegin() - for _i470 in xrange(_size466): - _elem471 = FieldSchema() - _elem471.read(iprot) - self.partitionOrder.append(_elem471) + (_etype497, _size494) = iprot.readListBegin() + for _i498 in xrange(_size494): + _elem499 = FieldSchema() + _elem499.read(iprot) + self.partitionOrder.append(_elem499) iprot.readListEnd() else: iprot.skip(ftype) @@ -10772,8 +11201,8 @@ def write(self, oprot): if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter472 in self.partitionKeys: - iter472.write(oprot) + for iter500 in self.partitionKeys: + iter500.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.applyDistinct is not None: @@ -10787,8 +11216,8 @@ def write(self, oprot): if self.partitionOrder is not None: oprot.writeFieldBegin('partitionOrder', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.partitionOrder)) - for iter473 in self.partitionOrder: - iter473.write(oprot) + for iter501 in self.partitionOrder: + iter501.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ascending is not None: @@ -10866,10 +11295,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.row = [] - (_etype477, _size474) = iprot.readListBegin() - for _i478 in xrange(_size474): - _elem479 = iprot.readString() - self.row.append(_elem479) + (_etype505, _size502) = iprot.readListBegin() + for _i506 in xrange(_size502): + _elem507 = iprot.readString() + self.row.append(_elem507) iprot.readListEnd() else: iprot.skip(ftype) @@ -10886,8 +11315,8 @@ def write(self, oprot): if self.row is not None: oprot.writeFieldBegin('row', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.row)) - for iter480 in self.row: - oprot.writeString(iter480) + for iter508 in self.row: + oprot.writeString(iter508) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10941,11 +11370,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionValues = [] - (_etype484, _size481) = iprot.readListBegin() - for _i485 in xrange(_size481): - _elem486 = PartitionValuesRow() - _elem486.read(iprot) - self.partitionValues.append(_elem486) + (_etype512, _size509) = iprot.readListBegin() + for _i513 in xrange(_size509): + _elem514 = PartitionValuesRow() + _elem514.read(iprot) + self.partitionValues.append(_elem514) iprot.readListEnd() else: iprot.skip(ftype) @@ -10962,8 +11391,8 @@ def write(self, oprot): if self.partitionValues is not None: oprot.writeFieldBegin('partitionValues', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionValues)) - for iter487 in self.partitionValues: - iter487.write(oprot) + for iter515 in self.partitionValues: + iter515.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11154,11 +11583,11 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.resourceUris = [] - (_etype491, _size488) = iprot.readListBegin() - for _i492 in xrange(_size488): - _elem493 = ResourceUri() - _elem493.read(iprot) - self.resourceUris.append(_elem493) + (_etype519, _size516) = iprot.readListBegin() + for _i520 in xrange(_size516): + _elem521 = ResourceUri() + _elem521.read(iprot) + self.resourceUris.append(_elem521) iprot.readListEnd() else: iprot.skip(ftype) @@ -11208,8 +11637,8 @@ def write(self, oprot): if self.resourceUris is not None: oprot.writeFieldBegin('resourceUris', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) - for iter494 in self.resourceUris: - iter494.write(oprot) + for iter522 in self.resourceUris: + iter522.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -11458,11 +11887,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype498, _size495) = iprot.readListBegin() - for _i499 in xrange(_size495): - _elem500 = TxnInfo() - _elem500.read(iprot) - self.open_txns.append(_elem500) + (_etype526, _size523) = iprot.readListBegin() + for _i527 in xrange(_size523): + _elem528 = TxnInfo() + _elem528.read(iprot) + self.open_txns.append(_elem528) iprot.readListEnd() else: iprot.skip(ftype) @@ -11483,8 +11912,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) - for iter501 in self.open_txns: - iter501.write(oprot) + for iter529 in self.open_txns: + iter529.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11555,10 +11984,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype505, _size502) = iprot.readListBegin() - for _i506 in xrange(_size502): - _elem507 = iprot.readI64() - self.open_txns.append(_elem507) + (_etype533, _size530) = iprot.readListBegin() + for _i534 in xrange(_size530): + _elem535 = iprot.readI64() + self.open_txns.append(_elem535) iprot.readListEnd() else: iprot.skip(ftype) @@ -11589,8 +12018,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.I64, len(self.open_txns)) - for iter508 in self.open_txns: - oprot.writeI64(iter508) + for iter536 in self.open_txns: + oprot.writeI64(iter536) oprot.writeListEnd() oprot.writeFieldEnd() if self.min_open_txn is not None: @@ -11699,10 +12128,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.replSrcTxnIds = [] - (_etype512, _size509) = iprot.readListBegin() - for _i513 in xrange(_size509): - _elem514 = iprot.readI64() - self.replSrcTxnIds.append(_elem514) + (_etype540, _size537) = iprot.readListBegin() + for _i541 in xrange(_size537): + _elem542 = iprot.readI64() + self.replSrcTxnIds.append(_elem542) iprot.readListEnd() else: iprot.skip(ftype) @@ -11739,8 +12168,8 @@ def write(self, oprot): if self.replSrcTxnIds is not None: oprot.writeFieldBegin('replSrcTxnIds', TType.LIST, 6) oprot.writeListBegin(TType.I64, len(self.replSrcTxnIds)) - for iter515 in self.replSrcTxnIds: - oprot.writeI64(iter515) + for iter543 in self.replSrcTxnIds: + oprot.writeI64(iter543) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11803,10 +12232,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype519, _size516) = iprot.readListBegin() - for _i520 in xrange(_size516): - _elem521 = iprot.readI64() - self.txn_ids.append(_elem521) + (_etype547, _size544) = iprot.readListBegin() + for _i548 in xrange(_size544): + _elem549 = iprot.readI64() + self.txn_ids.append(_elem549) iprot.readListEnd() else: iprot.skip(ftype) @@ -11823,8 +12252,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter522 in self.txn_ids: - oprot.writeI64(iter522) + for iter550 in self.txn_ids: + oprot.writeI64(iter550) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11958,10 +12387,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype526, _size523) = iprot.readListBegin() - for _i527 in xrange(_size523): - _elem528 = iprot.readI64() - self.txn_ids.append(_elem528) + (_etype554, _size551) = iprot.readListBegin() + for _i555 in xrange(_size551): + _elem556 = iprot.readI64() + self.txn_ids.append(_elem556) iprot.readListEnd() else: iprot.skip(ftype) @@ -11978,8 +12407,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter529 in self.txn_ids: - oprot.writeI64(iter529) + for iter557 in self.txn_ids: + oprot.writeI64(iter557) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12049,11 +12478,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.writeEventInfos = [] - (_etype533, _size530) = iprot.readListBegin() - for _i534 in xrange(_size530): - _elem535 = WriteEventInfo() - _elem535.read(iprot) - self.writeEventInfos.append(_elem535) + (_etype561, _size558) = iprot.readListBegin() + for _i562 in xrange(_size558): + _elem563 = WriteEventInfo() + _elem563.read(iprot) + self.writeEventInfos.append(_elem563) iprot.readListEnd() else: iprot.skip(ftype) @@ -12078,8 +12507,8 @@ def write(self, oprot): if self.writeEventInfos is not None: oprot.writeFieldBegin('writeEventInfos', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.writeEventInfos)) - for iter536 in self.writeEventInfos: - iter536.write(oprot) + for iter564 in self.writeEventInfos: + iter564.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12326,10 +12755,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partNames = [] - (_etype540, _size537) = iprot.readListBegin() - for _i541 in xrange(_size537): - _elem542 = iprot.readString() - self.partNames.append(_elem542) + (_etype568, _size565) = iprot.readListBegin() + for _i569 in xrange(_size565): + _elem570 = iprot.readString() + self.partNames.append(_elem570) iprot.readListEnd() else: iprot.skip(ftype) @@ -12366,8 +12795,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter543 in self.partNames: - oprot.writeString(iter543) + for iter571 in self.partNames: + oprot.writeString(iter571) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12437,10 +12866,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fullTableNames = [] - (_etype547, _size544) = iprot.readListBegin() - for _i548 in xrange(_size544): - _elem549 = iprot.readString() - self.fullTableNames.append(_elem549) + (_etype575, _size572) = iprot.readListBegin() + for _i576 in xrange(_size572): + _elem577 = iprot.readString() + self.fullTableNames.append(_elem577) iprot.readListEnd() else: iprot.skip(ftype) @@ -12462,8 +12891,8 @@ def write(self, oprot): if self.fullTableNames is not None: oprot.writeFieldBegin('fullTableNames', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fullTableNames)) - for iter550 in self.fullTableNames: - oprot.writeString(iter550) + for iter578 in self.fullTableNames: + oprot.writeString(iter578) oprot.writeListEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -12546,10 +12975,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.invalidWriteIds = [] - (_etype554, _size551) = iprot.readListBegin() - for _i555 in xrange(_size551): - _elem556 = iprot.readI64() - self.invalidWriteIds.append(_elem556) + (_etype582, _size579) = iprot.readListBegin() + for _i583 in xrange(_size579): + _elem584 = iprot.readI64() + self.invalidWriteIds.append(_elem584) iprot.readListEnd() else: iprot.skip(ftype) @@ -12584,8 +13013,8 @@ def write(self, oprot): if self.invalidWriteIds is not None: oprot.writeFieldBegin('invalidWriteIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.invalidWriteIds)) - for iter557 in self.invalidWriteIds: - oprot.writeI64(iter557) + for iter585 in self.invalidWriteIds: + oprot.writeI64(iter585) oprot.writeListEnd() oprot.writeFieldEnd() if self.minOpenWriteId is not None: @@ -12657,11 +13086,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tblValidWriteIds = [] - (_etype561, _size558) = iprot.readListBegin() - for _i562 in xrange(_size558): - _elem563 = TableValidWriteIds() - _elem563.read(iprot) - self.tblValidWriteIds.append(_elem563) + (_etype589, _size586) = iprot.readListBegin() + for _i590 in xrange(_size586): + _elem591 = TableValidWriteIds() + _elem591.read(iprot) + self.tblValidWriteIds.append(_elem591) iprot.readListEnd() else: iprot.skip(ftype) @@ -12678,8 +13107,8 @@ def write(self, oprot): if self.tblValidWriteIds is not None: oprot.writeFieldBegin('tblValidWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) - for iter564 in self.tblValidWriteIds: - iter564.write(oprot) + for iter592 in self.tblValidWriteIds: + iter592.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12755,10 +13184,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.txnIds = [] - (_etype568, _size565) = iprot.readListBegin() - for _i569 in xrange(_size565): - _elem570 = iprot.readI64() - self.txnIds.append(_elem570) + (_etype596, _size593) = iprot.readListBegin() + for _i597 in xrange(_size593): + _elem598 = iprot.readI64() + self.txnIds.append(_elem598) iprot.readListEnd() else: iprot.skip(ftype) @@ -12770,11 +13199,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.srcTxnToWriteIdList = [] - (_etype574, _size571) = iprot.readListBegin() - for _i575 in xrange(_size571): - _elem576 = TxnToWriteId() - _elem576.read(iprot) - self.srcTxnToWriteIdList.append(_elem576) + (_etype602, _size599) = iprot.readListBegin() + for _i603 in xrange(_size599): + _elem604 = TxnToWriteId() + _elem604.read(iprot) + self.srcTxnToWriteIdList.append(_elem604) iprot.readListEnd() else: iprot.skip(ftype) @@ -12799,8 +13228,8 @@ def write(self, oprot): if self.txnIds is not None: oprot.writeFieldBegin('txnIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.txnIds)) - for iter577 in self.txnIds: - oprot.writeI64(iter577) + for iter605 in self.txnIds: + oprot.writeI64(iter605) oprot.writeListEnd() oprot.writeFieldEnd() if self.replPolicy is not None: @@ -12810,8 +13239,8 @@ def write(self, oprot): if self.srcTxnToWriteIdList is not None: oprot.writeFieldBegin('srcTxnToWriteIdList', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.srcTxnToWriteIdList)) - for iter578 in self.srcTxnToWriteIdList: - iter578.write(oprot) + for iter606 in self.srcTxnToWriteIdList: + iter606.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12953,11 +13382,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnToWriteIds = [] - (_etype582, _size579) = iprot.readListBegin() - for _i583 in xrange(_size579): - _elem584 = TxnToWriteId() - _elem584.read(iprot) - self.txnToWriteIds.append(_elem584) + (_etype610, _size607) = iprot.readListBegin() + for _i611 in xrange(_size607): + _elem612 = TxnToWriteId() + _elem612.read(iprot) + self.txnToWriteIds.append(_elem612) iprot.readListEnd() else: iprot.skip(ftype) @@ -12974,8 +13403,8 @@ def write(self, oprot): if self.txnToWriteIds is not None: oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) - for iter585 in self.txnToWriteIds: - iter585.write(oprot) + for iter613 in self.txnToWriteIds: + iter613.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13203,11 +13632,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype589, _size586) = iprot.readListBegin() - for _i590 in xrange(_size586): - _elem591 = LockComponent() - _elem591.read(iprot) - self.component.append(_elem591) + (_etype617, _size614) = iprot.readListBegin() + for _i618 in xrange(_size614): + _elem619 = LockComponent() + _elem619.read(iprot) + self.component.append(_elem619) iprot.readListEnd() else: iprot.skip(ftype) @@ -13244,8 +13673,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter592 in self.component: - iter592.write(oprot) + for iter620 in self.component: + iter620.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -13943,11 +14372,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype596, _size593) = iprot.readListBegin() - for _i597 in xrange(_size593): - _elem598 = ShowLocksResponseElement() - _elem598.read(iprot) - self.locks.append(_elem598) + (_etype624, _size621) = iprot.readListBegin() + for _i625 in xrange(_size621): + _elem626 = ShowLocksResponseElement() + _elem626.read(iprot) + self.locks.append(_elem626) iprot.readListEnd() else: iprot.skip(ftype) @@ -13964,8 +14393,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter599 in self.locks: - iter599.write(oprot) + for iter627 in self.locks: + iter627.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14180,20 +14609,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype603, _size600) = iprot.readSetBegin() - for _i604 in xrange(_size600): - _elem605 = iprot.readI64() - self.aborted.add(_elem605) + (_etype631, _size628) = iprot.readSetBegin() + for _i632 in xrange(_size628): + _elem633 = iprot.readI64() + self.aborted.add(_elem633) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype609, _size606) = iprot.readSetBegin() - for _i610 in xrange(_size606): - _elem611 = iprot.readI64() - self.nosuch.add(_elem611) + (_etype637, _size634) = iprot.readSetBegin() + for _i638 in xrange(_size634): + _elem639 = iprot.readI64() + self.nosuch.add(_elem639) iprot.readSetEnd() else: iprot.skip(ftype) @@ -14210,15 +14639,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter612 in self.aborted: - oprot.writeI64(iter612) + for iter640 in self.aborted: + oprot.writeI64(iter640) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter613 in self.nosuch: - oprot.writeI64(iter613) + for iter641 in self.nosuch: + oprot.writeI64(iter641) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14315,11 +14744,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype615, _vtype616, _size614 ) = iprot.readMapBegin() - for _i618 in xrange(_size614): - _key619 = iprot.readString() - _val620 = iprot.readString() - self.properties[_key619] = _val620 + (_ktype643, _vtype644, _size642 ) = iprot.readMapBegin() + for _i646 in xrange(_size642): + _key647 = iprot.readString() + _val648 = iprot.readString() + self.properties[_key647] = _val648 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14356,9 +14785,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter621,viter622 in self.properties.items(): - oprot.writeString(kiter621) - oprot.writeString(viter622) + for kiter649,viter650 in self.properties.items(): + oprot.writeString(kiter649) + oprot.writeString(viter650) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14793,11 +15222,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype626, _size623) = iprot.readListBegin() - for _i627 in xrange(_size623): - _elem628 = ShowCompactResponseElement() - _elem628.read(iprot) - self.compacts.append(_elem628) + (_etype654, _size651) = iprot.readListBegin() + for _i655 in xrange(_size651): + _elem656 = ShowCompactResponseElement() + _elem656.read(iprot) + self.compacts.append(_elem656) iprot.readListEnd() else: iprot.skip(ftype) @@ -14814,8 +15243,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter629 in self.compacts: - iter629.write(oprot) + for iter657 in self.compacts: + iter657.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14904,10 +15333,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionnames = [] - (_etype633, _size630) = iprot.readListBegin() - for _i634 in xrange(_size630): - _elem635 = iprot.readString() - self.partitionnames.append(_elem635) + (_etype661, _size658) = iprot.readListBegin() + for _i662 in xrange(_size658): + _elem663 = iprot.readString() + self.partitionnames.append(_elem663) iprot.readListEnd() else: iprot.skip(ftype) @@ -14945,8 +15374,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter636 in self.partitionnames: - oprot.writeString(iter636) + for iter664 in self.partitionnames: + oprot.writeString(iter664) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -15179,10 +15608,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.SET: self.tablesUsed = set() - (_etype640, _size637) = iprot.readSetBegin() - for _i641 in xrange(_size637): - _elem642 = iprot.readString() - self.tablesUsed.add(_elem642) + (_etype668, _size665) = iprot.readSetBegin() + for _i669 in xrange(_size665): + _elem670 = iprot.readString() + self.tablesUsed.add(_elem670) iprot.readSetEnd() else: iprot.skip(ftype) @@ -15221,8 +15650,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 4) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter643 in self.tablesUsed: - oprot.writeString(iter643) + for iter671 in self.tablesUsed: + oprot.writeString(iter671) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -15539,11 +15968,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype647, _size644) = iprot.readListBegin() - for _i648 in xrange(_size644): - _elem649 = NotificationEvent() - _elem649.read(iprot) - self.events.append(_elem649) + (_etype675, _size672) = iprot.readListBegin() + for _i676 in xrange(_size672): + _elem677 = NotificationEvent() + _elem677.read(iprot) + self.events.append(_elem677) iprot.readListEnd() else: iprot.skip(ftype) @@ -15560,8 +15989,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter650 in self.events: - iter650.write(oprot) + for iter678 in self.events: + iter678.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15858,30 +16287,30 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype654, _size651) = iprot.readListBegin() - for _i655 in xrange(_size651): - _elem656 = iprot.readString() - self.filesAdded.append(_elem656) + (_etype682, _size679) = iprot.readListBegin() + for _i683 in xrange(_size679): + _elem684 = iprot.readString() + self.filesAdded.append(_elem684) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype660, _size657) = iprot.readListBegin() - for _i661 in xrange(_size657): - _elem662 = iprot.readString() - self.filesAddedChecksum.append(_elem662) + (_etype688, _size685) = iprot.readListBegin() + for _i689 in xrange(_size685): + _elem690 = iprot.readString() + self.filesAddedChecksum.append(_elem690) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.subDirectoryList = [] - (_etype666, _size663) = iprot.readListBegin() - for _i667 in xrange(_size663): - _elem668 = iprot.readString() - self.subDirectoryList.append(_elem668) + (_etype694, _size691) = iprot.readListBegin() + for _i695 in xrange(_size691): + _elem696 = iprot.readString() + self.subDirectoryList.append(_elem696) iprot.readListEnd() else: iprot.skip(ftype) @@ -15902,22 +16331,22 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter669 in self.filesAdded: - oprot.writeString(iter669) + for iter697 in self.filesAdded: + oprot.writeString(iter697) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter670 in self.filesAddedChecksum: - oprot.writeString(iter670) + for iter698 in self.filesAddedChecksum: + oprot.writeString(iter698) oprot.writeListEnd() oprot.writeFieldEnd() if self.subDirectoryList is not None: oprot.writeFieldBegin('subDirectoryList', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.subDirectoryList)) - for iter671 in self.subDirectoryList: - oprot.writeString(iter671) + for iter699 in self.subDirectoryList: + oprot.writeString(iter699) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16076,10 +16505,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype675, _size672) = iprot.readListBegin() - for _i676 in xrange(_size672): - _elem677 = iprot.readString() - self.partitionVals.append(_elem677) + (_etype703, _size700) = iprot.readListBegin() + for _i704 in xrange(_size700): + _elem705 = iprot.readString() + self.partitionVals.append(_elem705) iprot.readListEnd() else: iprot.skip(ftype) @@ -16117,8 +16546,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter678 in self.partitionVals: - oprot.writeString(iter678) + for iter706 in self.partitionVals: + oprot.writeString(iter706) oprot.writeListEnd() oprot.writeFieldEnd() if self.catName is not None: @@ -16270,10 +16699,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionVals = [] - (_etype682, _size679) = iprot.readListBegin() - for _i683 in xrange(_size679): - _elem684 = iprot.readString() - self.partitionVals.append(_elem684) + (_etype710, _size707) = iprot.readListBegin() + for _i711 in xrange(_size707): + _elem712 = iprot.readString() + self.partitionVals.append(_elem712) iprot.readListEnd() else: iprot.skip(ftype) @@ -16310,8 +16739,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter685 in self.partitionVals: - oprot.writeString(iter685) + for iter713 in self.partitionVals: + oprot.writeString(iter713) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16505,12 +16934,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype687, _vtype688, _size686 ) = iprot.readMapBegin() - for _i690 in xrange(_size686): - _key691 = iprot.readI64() - _val692 = MetadataPpdResult() - _val692.read(iprot) - self.metadata[_key691] = _val692 + (_ktype715, _vtype716, _size714 ) = iprot.readMapBegin() + for _i718 in xrange(_size714): + _key719 = iprot.readI64() + _val720 = MetadataPpdResult() + _val720.read(iprot) + self.metadata[_key719] = _val720 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16532,9 +16961,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter693,viter694 in self.metadata.items(): - oprot.writeI64(kiter693) - viter694.write(oprot) + for kiter721,viter722 in self.metadata.items(): + oprot.writeI64(kiter721) + viter722.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -16604,10 +17033,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype698, _size695) = iprot.readListBegin() - for _i699 in xrange(_size695): - _elem700 = iprot.readI64() - self.fileIds.append(_elem700) + (_etype726, _size723) = iprot.readListBegin() + for _i727 in xrange(_size723): + _elem728 = iprot.readI64() + self.fileIds.append(_elem728) iprot.readListEnd() else: iprot.skip(ftype) @@ -16639,8 +17068,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter701 in self.fileIds: - oprot.writeI64(iter701) + for iter729 in self.fileIds: + oprot.writeI64(iter729) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -16714,11 +17143,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype703, _vtype704, _size702 ) = iprot.readMapBegin() - for _i706 in xrange(_size702): - _key707 = iprot.readI64() - _val708 = iprot.readString() - self.metadata[_key707] = _val708 + (_ktype731, _vtype732, _size730 ) = iprot.readMapBegin() + for _i734 in xrange(_size730): + _key735 = iprot.readI64() + _val736 = iprot.readString() + self.metadata[_key735] = _val736 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16740,9 +17169,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter709,viter710 in self.metadata.items(): - oprot.writeI64(kiter709) - oprot.writeString(viter710) + for kiter737,viter738 in self.metadata.items(): + oprot.writeI64(kiter737) + oprot.writeString(viter738) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -16803,10 +17232,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype714, _size711) = iprot.readListBegin() - for _i715 in xrange(_size711): - _elem716 = iprot.readI64() - self.fileIds.append(_elem716) + (_etype742, _size739) = iprot.readListBegin() + for _i743 in xrange(_size739): + _elem744 = iprot.readI64() + self.fileIds.append(_elem744) iprot.readListEnd() else: iprot.skip(ftype) @@ -16823,8 +17252,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter717 in self.fileIds: - oprot.writeI64(iter717) + for iter745 in self.fileIds: + oprot.writeI64(iter745) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16930,20 +17359,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype721, _size718) = iprot.readListBegin() - for _i722 in xrange(_size718): - _elem723 = iprot.readI64() - self.fileIds.append(_elem723) + (_etype749, _size746) = iprot.readListBegin() + for _i750 in xrange(_size746): + _elem751 = iprot.readI64() + self.fileIds.append(_elem751) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype727, _size724) = iprot.readListBegin() - for _i728 in xrange(_size724): - _elem729 = iprot.readString() - self.metadata.append(_elem729) + (_etype755, _size752) = iprot.readListBegin() + for _i756 in xrange(_size752): + _elem757 = iprot.readString() + self.metadata.append(_elem757) iprot.readListEnd() else: iprot.skip(ftype) @@ -16965,15 +17394,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter730 in self.fileIds: - oprot.writeI64(iter730) + for iter758 in self.fileIds: + oprot.writeI64(iter758) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter731 in self.metadata: - oprot.writeString(iter731) + for iter759 in self.metadata: + oprot.writeString(iter759) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -17081,10 +17510,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype735, _size732) = iprot.readListBegin() - for _i736 in xrange(_size732): - _elem737 = iprot.readI64() - self.fileIds.append(_elem737) + (_etype763, _size760) = iprot.readListBegin() + for _i764 in xrange(_size760): + _elem765 = iprot.readI64() + self.fileIds.append(_elem765) iprot.readListEnd() else: iprot.skip(ftype) @@ -17101,8 +17530,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter738 in self.fileIds: - oprot.writeI64(iter738) + for iter766 in self.fileIds: + oprot.writeI64(iter766) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17331,11 +17760,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype742, _size739) = iprot.readListBegin() - for _i743 in xrange(_size739): - _elem744 = Function() - _elem744.read(iprot) - self.functions.append(_elem744) + (_etype770, _size767) = iprot.readListBegin() + for _i771 in xrange(_size767): + _elem772 = Function() + _elem772.read(iprot) + self.functions.append(_elem772) iprot.readListEnd() else: iprot.skip(ftype) @@ -17352,8 +17781,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter745 in self.functions: - iter745.write(oprot) + for iter773 in self.functions: + iter773.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17405,10 +17834,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype749, _size746) = iprot.readListBegin() - for _i750 in xrange(_size746): - _elem751 = iprot.readI32() - self.values.append(_elem751) + (_etype777, _size774) = iprot.readListBegin() + for _i778 in xrange(_size774): + _elem779 = iprot.readI32() + self.values.append(_elem779) iprot.readListEnd() else: iprot.skip(ftype) @@ -17425,8 +17854,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter752 in self.values: - oprot.writeI32(iter752) + for iter780 in self.values: + oprot.writeI32(iter780) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17698,10 +18127,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype756, _size753) = iprot.readListBegin() - for _i757 in xrange(_size753): - _elem758 = iprot.readString() - self.tblNames.append(_elem758) + (_etype784, _size781) = iprot.readListBegin() + for _i785 in xrange(_size781): + _elem786 = iprot.readString() + self.tblNames.append(_elem786) iprot.readListEnd() else: iprot.skip(ftype) @@ -17733,8 +18162,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter759 in self.tblNames: - oprot.writeString(iter759) + for iter787 in self.tblNames: + oprot.writeString(iter787) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -17799,11 +18228,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype763, _size760) = iprot.readListBegin() - for _i764 in xrange(_size760): - _elem765 = Table() - _elem765.read(iprot) - self.tables.append(_elem765) + (_etype791, _size788) = iprot.readListBegin() + for _i792 in xrange(_size788): + _elem793 = Table() + _elem793.read(iprot) + self.tables.append(_elem793) iprot.readListEnd() else: iprot.skip(ftype) @@ -17820,8 +18249,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter766 in self.tables: - iter766.write(oprot) + for iter794 in self.tables: + iter794.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19029,44 +19458,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype770, _size767) = iprot.readListBegin() - for _i771 in xrange(_size767): - _elem772 = WMPool() - _elem772.read(iprot) - self.pools.append(_elem772) + (_etype798, _size795) = iprot.readListBegin() + for _i799 in xrange(_size795): + _elem800 = WMPool() + _elem800.read(iprot) + self.pools.append(_elem800) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype776, _size773) = iprot.readListBegin() - for _i777 in xrange(_size773): - _elem778 = WMMapping() - _elem778.read(iprot) - self.mappings.append(_elem778) + (_etype804, _size801) = iprot.readListBegin() + for _i805 in xrange(_size801): + _elem806 = WMMapping() + _elem806.read(iprot) + self.mappings.append(_elem806) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype782, _size779) = iprot.readListBegin() - for _i783 in xrange(_size779): - _elem784 = WMTrigger() - _elem784.read(iprot) - self.triggers.append(_elem784) + (_etype810, _size807) = iprot.readListBegin() + for _i811 in xrange(_size807): + _elem812 = WMTrigger() + _elem812.read(iprot) + self.triggers.append(_elem812) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype788, _size785) = iprot.readListBegin() - for _i789 in xrange(_size785): - _elem790 = WMPoolTrigger() - _elem790.read(iprot) - self.poolTriggers.append(_elem790) + (_etype816, _size813) = iprot.readListBegin() + for _i817 in xrange(_size813): + _elem818 = WMPoolTrigger() + _elem818.read(iprot) + self.poolTriggers.append(_elem818) iprot.readListEnd() else: iprot.skip(ftype) @@ -19087,29 +19516,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter791 in self.pools: - iter791.write(oprot) + for iter819 in self.pools: + iter819.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter792 in self.mappings: - iter792.write(oprot) + for iter820 in self.mappings: + iter820.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter793 in self.triggers: - iter793.write(oprot) + for iter821 in self.triggers: + iter821.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter794 in self.poolTriggers: - iter794.write(oprot) + for iter822 in self.poolTriggers: + iter822.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19583,11 +20012,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype798, _size795) = iprot.readListBegin() - for _i799 in xrange(_size795): - _elem800 = WMResourcePlan() - _elem800.read(iprot) - self.resourcePlans.append(_elem800) + (_etype826, _size823) = iprot.readListBegin() + for _i827 in xrange(_size823): + _elem828 = WMResourcePlan() + _elem828.read(iprot) + self.resourcePlans.append(_elem828) iprot.readListEnd() else: iprot.skip(ftype) @@ -19604,8 +20033,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter801 in self.resourcePlans: - iter801.write(oprot) + for iter829 in self.resourcePlans: + iter829.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19909,20 +20338,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype805, _size802) = iprot.readListBegin() - for _i806 in xrange(_size802): - _elem807 = iprot.readString() - self.errors.append(_elem807) + (_etype833, _size830) = iprot.readListBegin() + for _i834 in xrange(_size830): + _elem835 = iprot.readString() + self.errors.append(_elem835) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype811, _size808) = iprot.readListBegin() - for _i812 in xrange(_size808): - _elem813 = iprot.readString() - self.warnings.append(_elem813) + (_etype839, _size836) = iprot.readListBegin() + for _i840 in xrange(_size836): + _elem841 = iprot.readString() + self.warnings.append(_elem841) iprot.readListEnd() else: iprot.skip(ftype) @@ -19939,15 +20368,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter814 in self.errors: - oprot.writeString(iter814) + for iter842 in self.errors: + oprot.writeString(iter842) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter815 in self.warnings: - oprot.writeString(iter815) + for iter843 in self.warnings: + oprot.writeString(iter843) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20524,11 +20953,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype819, _size816) = iprot.readListBegin() - for _i820 in xrange(_size816): - _elem821 = WMTrigger() - _elem821.read(iprot) - self.triggers.append(_elem821) + (_etype847, _size844) = iprot.readListBegin() + for _i848 in xrange(_size844): + _elem849 = WMTrigger() + _elem849.read(iprot) + self.triggers.append(_elem849) iprot.readListEnd() else: iprot.skip(ftype) @@ -20545,8 +20974,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter822 in self.triggers: - iter822.write(oprot) + for iter850 in self.triggers: + iter850.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21730,11 +22159,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.cols = [] - (_etype826, _size823) = iprot.readListBegin() - for _i827 in xrange(_size823): - _elem828 = FieldSchema() - _elem828.read(iprot) - self.cols.append(_elem828) + (_etype854, _size851) = iprot.readListBegin() + for _i855 in xrange(_size851): + _elem856 = FieldSchema() + _elem856.read(iprot) + self.cols.append(_elem856) iprot.readListEnd() else: iprot.skip(ftype) @@ -21794,8 +22223,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter829 in self.cols: - iter829.write(oprot) + for iter857 in self.cols: + iter857.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.state is not None: @@ -22050,11 +22479,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.schemaVersions = [] - (_etype833, _size830) = iprot.readListBegin() - for _i834 in xrange(_size830): - _elem835 = SchemaVersionDescriptor() - _elem835.read(iprot) - self.schemaVersions.append(_elem835) + (_etype861, _size858) = iprot.readListBegin() + for _i862 in xrange(_size858): + _elem863 = SchemaVersionDescriptor() + _elem863.read(iprot) + self.schemaVersions.append(_elem863) iprot.readListEnd() else: iprot.skip(ftype) @@ -22071,8 +22500,8 @@ def write(self, oprot): if self.schemaVersions is not None: oprot.writeFieldBegin('schemaVersions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions)) - for iter836 in self.schemaVersions: - iter836.write(oprot) + for iter864 in self.schemaVersions: + iter864.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22557,11 +22986,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitions = [] - (_etype840, _size837) = iprot.readListBegin() - for _i841 in xrange(_size837): - _elem842 = Partition() - _elem842.read(iprot) - self.partitions.append(_elem842) + (_etype868, _size865) = iprot.readListBegin() + for _i869 in xrange(_size865): + _elem870 = Partition() + _elem870.read(iprot) + self.partitions.append(_elem870) iprot.readListEnd() else: iprot.skip(ftype) @@ -22606,8 +23035,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter843 in self.partitions: - iter843.write(oprot) + for iter871 in self.partitions: + iter871.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environmentContext is not None: @@ -22759,10 +23188,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partVals = [] - (_etype847, _size844) = iprot.readListBegin() - for _i848 in xrange(_size844): - _elem849 = iprot.readString() - self.partVals.append(_elem849) + (_etype875, _size872) = iprot.readListBegin() + for _i876 in xrange(_size872): + _elem877 = iprot.readString() + self.partVals.append(_elem877) iprot.readListEnd() else: iprot.skip(ftype) @@ -22802,8 +23231,8 @@ def write(self, oprot): if self.partVals is not None: oprot.writeFieldBegin('partVals', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partVals)) - for iter850 in self.partVals: - oprot.writeString(iter850) + for iter878 in self.partVals: + oprot.writeString(iter878) oprot.writeListEnd() oprot.writeFieldEnd() if self.newPart is not None: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index e0c6c02715dab4d9ad457ec710bcb3159206c6c6..a9fbf05d4ec74ea5823ef75f2da0158084eb6143 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -141,6 +141,14 @@ module SchemaVersionState VALID_VALUES = Set.new([INITIATED, START_REVIEW, CHANGES_REQUIRED, REVIEWED, ENABLED, DISABLED, ARCHIVED, DELETED]).freeze end +module PartitionFilterMode + BY_NAMES = 0 + BY_VALUES = 1 + BY_EXPR = 2 + VALUE_MAP = {0 => "BY_NAMES", 1 => "BY_VALUES", 2 => "BY_EXPR"} + VALID_VALUES = Set.new([BY_NAMES, BY_VALUES, BY_EXPR]).freeze +end + module FunctionType JAVA = 1 VALUE_MAP = {1 => "JAVA"} @@ -1176,6 +1184,91 @@ class Partition ::Thrift::Struct.generate_accessors self end +class GetPartitionsProjectSpec + include ::Thrift::Struct, ::Thrift::Struct_Union + FIELDLIST = 1 + PARAMKEYPATTERN = 2 + EXCLUDEPARAMKEYPATTERN = 3 + + FIELDS = { + FIELDLIST => {:type => ::Thrift::Types::LIST, :name => 'fieldList', :element => {:type => ::Thrift::Types::STRING}}, + PARAMKEYPATTERN => {:type => ::Thrift::Types::STRING, :name => 'paramKeyPattern'}, + EXCLUDEPARAMKEYPATTERN => {:type => ::Thrift::Types::BOOL, :name => 'excludeParamKeyPattern'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class GetPartitionsFilterSpec + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + WITHAUTH = 3 + USER = 4 + GROUPNAMES = 5 + FILTERMODE = 6 + FILTERS = 7 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, + WITHAUTH => {:type => ::Thrift::Types::BOOL, :name => 'withAuth', :optional => true}, + USER => {:type => ::Thrift::Types::STRING, :name => 'user', :optional => true}, + GROUPNAMES => {:type => ::Thrift::Types::LIST, :name => 'groupNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, + FILTERMODE => {:type => ::Thrift::Types::I32, :name => 'filterMode', :optional => true, :enum_class => ::PartitionFilterMode}, + FILTERS => {:type => ::Thrift::Types::LIST, :name => 'filters', :element => {:type => ::Thrift::Types::STRING}, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + unless @filterMode.nil? || ::PartitionFilterMode::VALID_VALUES.include?(@filterMode) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field filterMode!') + end + end + + ::Thrift::Struct.generate_accessors self +end + +class GetPartitionsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + PARTITIONSPEC = 1 + + FIELDS = { + PARTITIONSPEC => {:type => ::Thrift::Types::LIST, :name => 'partitionSpec', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionSpec}} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class GetPartitionsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + PROJECTIONSPEC = 1 + FILTERSPEC = 2 + + FIELDS = { + PROJECTIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'projectionSpec', :class => ::GetPartitionsProjectSpec}, + FILTERSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'filterSpec', :class => ::GetPartitionsFilterSpec} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class PartitionWithoutSD include ::Thrift::Struct, ::Thrift::Struct_Union VALUES = 1 @@ -1191,7 +1284,7 @@ class PartitionWithoutSD LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'}, RELATIVEPATH => {:type => ::Thrift::Types::STRING, :name => 'relativePath'}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, - PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true} + PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet} } def struct_fields; FIELDS; end diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 92424a43feefc8c0db7c91302045437f3afbf274..77a2e1e6b08e2beefa0b860f3cd934c2bcb654ac 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -3576,6 +3576,22 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_runtime_stats failed: unknown result') end + def get_partitions_with_specs(request) + send_get_partitions_with_specs(request) + return recv_get_partitions_with_specs() + end + + def send_get_partitions_with_specs(request) + send_message('get_partitions_with_specs', Get_partitions_with_specs_args, :request => request) + end + + def recv_get_partitions_with_specs() + result = receive_message(Get_partitions_with_specs_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_with_specs failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -6263,6 +6279,17 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_runtime_stats', seqid) end + def process_get_partitions_with_specs(seqid, iprot, oprot) + args = read_args(iprot, Get_partitions_with_specs_args) + result = Get_partitions_with_specs_result.new() + begin + result.success = @handler.get_partitions_with_specs(args.request) + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'get_partitions_with_specs', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -14186,5 +14213,39 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_partitions_with_specs_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::GetPartitionsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_partitions_with_specs_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetPartitionsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 8a4bdd8ed827572f5fd9d291c5454630d84284bd..ad5794e1d6cf17229f6d85d7c4ffe501ad2edade 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -460,13 +460,44 @@ struct Partition { 11: optional bool isStatsCompliant } +struct GetPartitionsProjectSpec { + 1: list fieldList; // dot separated strings. Eg sd.location, serdeInfo.name. Empty list will mean all the fields + 2: string paramKeyPattern; //keys for the params to be either included or excluded. Can include _ or % wildcards + 3: bool excludeParamKeyPattern; //if true partition parameters keys which match paramKeyPattern will be excluded +} + +enum PartitionFilterMode { + BY_NAMES, //filter by names + BY_VALUES, //filter by values + BY_EXPR //filter by expression +} + +struct GetPartitionsFilterSpec { + 1: string dbName, + 2: string tblName, + 3: optional bool withAuth, + 4: optional string user, + 5: optional list groupNames, + 6: optional PartitionFilterMode filterMode, + 7: optional list filters //used as list of partitionNames or list of values or expressions depending on mode +} + +struct GetPartitionsResponse { + 1: list partitionSpec +} + +struct GetPartitionsRequest { + 1: GetPartitionsProjectSpec projectionSpec + 2: GetPartitionsFilterSpec filterSpec //TODO not yet implemented +} + struct PartitionWithoutSD { 1: list values // string value is converted to appropriate partition key type 2: i32 createTime, 3: i32 lastAccessTime, 4: string relativePath, 5: map parameters, - 6: optional PrincipalPrivilegeSet privileges + 6: PrincipalPrivilegeSet privileges } struct PartitionSpecWithSharedSD { @@ -2323,6 +2354,9 @@ service ThriftHiveMetastore extends fb303.FacebookService void add_runtime_stats(1: RuntimeStat stat) throws(1:MetaException o1) list get_runtime_stats(1: GetRuntimeStatsRequest rqst) throws(1:MetaException o1) + + //get_partitions with filter and projectspec + GetPartitionsResponse get_partitions_with_specs(1: GetPartitionsRequest request) throws(1:MetaException o1) } // * Note about the DDL_TIME: When creating or altering a table or a partition, diff --git standalone-metastore/metastore-common/src/test/resources/metastore-site.xml standalone-metastore/metastore-common/src/test/resources/metastore-site.xml new file mode 100644 index 0000000000000000000000000000000000000000..9fe837bee5ffa8b691e67f9d6c07314285b8c9b8 --- /dev/null +++ standalone-metastore/metastore-common/src/test/resources/metastore-site.xml @@ -0,0 +1,40 @@ + + + + + + metastore.thrift.uris + thrift://localhost:9083 + Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore. + + + hive.in.test + true + + + metastore.task.threads.always + org.apache.hadoop.hive.metastore.events.EventCleanerTask + + + metastore.expression.proxy + org.apache.hadoop.hive.metastore.DefaultPartitionExpressionProxy + + \ No newline at end of file diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 324035a8096a001d3fb170b8640805258d5e2cdd..2dbae2214d265366f586cf4aec3321152bc602b5 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -69,8 +69,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.Lists; -import com.google.common.collect.Multimaps; +import com.google.common.collect.Multimaps; import org.apache.commons.cli.OptionBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -4635,7 +4635,7 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int List partitions = get_partitions(db_name, tableName, (short) max_parts); if (is_partition_spec_grouping_enabled(table)) { - partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions); + partitionSpecs = MetaStoreUtils.get_partitionspecs_grouped_by_storage_descriptor(table, partitions); } else { PartitionSpec pSpec = new PartitionSpec(); @@ -4655,7 +4655,6 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int } private static class StorageDescriptorKey { - private final StorageDescriptor sd; StorageDescriptorKey(StorageDescriptor sd) { this.sd = sd; } @@ -4719,11 +4718,11 @@ public Boolean apply(Partition input) { ImmutableList partsWithinTableDir = partitionsWithinTableDirectory.get(true); for (Partition partition : partsWithinTableDir) { - PartitionWithoutSD partitionWithoutSD - = new PartitionWithoutSD( partition.getValues(), - partition.getCreateTime(), - partition.getLastAccessTime(), - partition.getSd().getLocation().substring(tablePath.length()), partition.getParameters()); + PartitionWithoutSD partitionWithoutSD = + new PartitionWithoutSD(partition.getValues(), partition.getCreateTime(), + partition.getLastAccessTime(), + partition.getSd().getLocation().substring(tablePath.length()), + partition.getParameters(), null); StorageDescriptorKey sdKey = new StorageDescriptorKey(partition.getSd()); if (!sdToPartList.containsKey(sdKey)) { @@ -4771,6 +4770,47 @@ private PartitionSpec getSharedSDPartSpec(Table table, StorageDescriptorKey sdKe return ret; } + @Override + public GetPartitionsResponse get_partitions_with_specs(GetPartitionsRequest request) + throws MetaException, TException { + GetPartitionsFilterSpec filterSpec = request.getFilterSpec(); + String[] parsedDbName = parseDbName(filterSpec.getDbName(), conf); + String tableName = filterSpec.getTblName(); + startTableFunction("get_partitions_with_specs", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tableName); + GetPartitionsResponse response = null; + Exception ex = null; + try { + List fieldList = null; + String paramkeyPattern = null; + boolean excludeParamKeyPattern = false; + if (request.isSetProjectionSpec()) { + GetPartitionsProjectSpec partitionsProjectSpec = request.getProjectionSpec(); + fieldList = partitionsProjectSpec.getFieldList(); + if (partitionsProjectSpec.isSetParamKeyPattern()) { + paramkeyPattern = partitionsProjectSpec.getParamKeyPattern(); + } + excludeParamKeyPattern = partitionsProjectSpec.isExcludeParamKeyPattern(); + } + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + //TODO currently disabling JDO in order to test using directSQL Need to create a test config + //to force use directSQL + List partitions = getMS() + .getPartitionSpecsByFilterAndProjection(table, true, false, fieldList, paramkeyPattern, + excludeParamKeyPattern); + List partitionSpecs = + MetaStoreUtils.get_partitionspecs_grouped_by_storage_descriptor(table, partitions); + response = new GetPartitionsResponse(); + response.setPartitionSpec(partitionSpecs); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_with_specs", response != null, ex, tableName); + } + return response; + } + private static boolean is_partition_spec_grouping_enabled(Table table) { Map parameters = table.getParameters(); @@ -6025,7 +6065,7 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); if (is_partition_spec_grouping_enabled(table)) { - partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions); + partitionSpecs = MetaStoreUtils.get_partitionspecs_grouped_by_storage_descriptor(table, partitions); } else { PartitionSpec pSpec = new PartitionSpec(); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 5ae00af564b05ec2720a03072f94c8f8579378a7..71d9b7dcf950adecc67f253ccd93ebf7715aa2b2 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -3624,4 +3624,10 @@ public void addRuntimeStat(RuntimeStat stat) throws TException { req.setMaxCreateTime(maxCreateTime); return client.get_runtime_stats(req); } + + @Override + public GetPartitionsResponse getPartitionsWithSpecs(GetPartitionsRequest request) + throws TException { + return client.get_partitions_with_specs(request); + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 91405b9a334a4b031a5dc7f4a1757a3895bfb386..2d6d74391b381db279d60aed5f7c60b228a831cf 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -59,6 +59,8 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse; import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; @@ -3748,4 +3750,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam /** Reads runtime statistics. */ List getRuntimeStats(int maxWeight, int maxCreateTime) throws TException; + + GetPartitionsResponse getPartitionsWithSpecs(GetPartitionsRequest request) throws TException; + } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 571c789eddfd2b1a27c65c48bdc6dccfafaaf676..4811d1af1f70720865321b9df1d2ea67df55c6af 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -23,8 +23,8 @@ import static org.apache.commons.lang.StringUtils.repeat; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import java.net.URL; import java.sql.Blob; -import java.sql.Clob; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; @@ -38,14 +38,13 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.stream.Collectors; import javax.jdo.PersistenceManager; import javax.jdo.Query; import javax.jdo.Transaction; import javax.jdo.datastore.JDOConnection; -import org.apache.commons.lang.BooleanUtils; +import com.google.common.collect.ImmutableMap; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats; @@ -135,6 +134,7 @@ */ private final boolean isCompatibleDatastore; private final boolean isAggregateStatsCacheEnabled; + private final ImmutableMap fieldnameToTableName; private AggregateStatsCache aggrStatsCache; @java.lang.annotation.Target(java.lang.annotation.ElementType.FIELD) @@ -166,11 +166,15 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche batchSize = DatabaseProduct.needsInBatching(dbType) ? 1000 : NO_BATCHING; } this.batchSize = batchSize; + ImmutableMap.Builder fieldNameToTableNameBuilder = + new ImmutableMap.Builder<>(); for (java.lang.reflect.Field f : this.getClass().getDeclaredFields()) { if (f.getAnnotation(TableName.class) == null) continue; try { - f.set(this, getFullyQualifiedName(schema, f.getName())); + String value = getFullyQualifiedName(schema, f.getName()); + f.set(this, value); + fieldNameToTableNameBuilder.put(f.getName(), value); } catch (IllegalArgumentException | IllegalAccessException e) { throw new RuntimeException("Internal error, cannot set " + f.getName()); } @@ -187,6 +191,7 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche isCompatibleDatastore = false; } else { boolean isInTest = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); + URL confLocation = MetastoreConf.getMetastoreSiteURL(); isCompatibleDatastore = (!isInTest || ensureDbInit()) && runTestQuery(); if (isCompatibleDatastore) { LOG.debug("Using direct SQL, underlying DB is " + dbType); @@ -198,6 +203,27 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche if (isAggregateStatsCacheEnabled) { aggrStatsCache = AggregateStatsCache.getInstance(conf); } + + // now use the tableanames to create the mapping + // note that some of the optional single-valued fields are not present + fieldnameToTableName = + fieldNameToTableNameBuilder + .put("createTime", PARTITIONS + ".\"CREATE_TIME\"") + .put("lastAccessTime", PARTITIONS + ".\"LAST_ACCESS_TIME\"") + .put("writeId", PARTITIONS + ".\"WRITE_ID\"") + .put("sd.location", SDS + ".\"LOCATION\"") + .put("sd.inputFormat", SDS + ".\"INPUT_FORMAT\"") + .put("sd.outputFormat", SDS + ".\"OUTPUT_FORMAT\"") + .put("sd.storedAsSubDirectories", SDS + ".\"IS_STOREDASSUBDIRECTORIES\"") + .put("sd.compressed", SDS + ".\"IS_COMPRESSED\"") + .put("sd.numBuckets", SDS + ".\"NUM_BUCKETS\"") + .put("sd.serdeInfo.name", SERDES + ".\"NAME\"") + .put("sd.serdeInfo.serializationLib", SERDES + ".\"SLIB\"") + .put("PART_ID", PARTITIONS + ".\"PART_ID\"") + .put("SD_ID", SDS + ".\"SD_ID\"") + .put("SERDE_ID", SERDES + ".\"SERDE_ID\"") + .put("CD_ID", SDS + ".\"CD_ID\"") + .build(); } private static String getFullyQualifiedName(String schema, String tblName) { @@ -314,7 +340,7 @@ private void executeNoResult(final String queryText) throws SQLException { long start = doTrace ? System.nanoTime() : 0; statement = ((Connection)jdoConn.getNativeConnection()).createStatement(); statement.execute(queryText); - timingTrace(doTrace, queryText, start, doTrace ? System.nanoTime() : 0); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, doTrace ? System.nanoTime() : 0); } finally { if(statement != null){ statement.close(); @@ -355,7 +381,7 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ } Object[] dbline = sqlResult.get(0); - Long dbid = extractSqlLong(dbline[0]); + Long dbid = MetastoreDirectSqlUtils.extractSqlLong(dbline[0]); String queryTextDbParams = "select \"PARAM_KEY\", \"PARAM_VALUE\" " + " from " + DATABASE_PARAMS + " " @@ -369,7 +395,7 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ } Map dbParams = new HashMap(); - List sqlResult2 = ensureList(executeWithArray( + List sqlResult2 = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryDbParams, params, queryTextDbParams)); if (!sqlResult2.isEmpty()) { for (Object[] line : sqlResult2) { @@ -468,12 +494,12 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ @Override public List run(List input) throws MetaException { String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")"; - List partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName, + List partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName, filter, input, Collections.emptyList(), null); if (partitionIds.isEmpty()) { return Collections.emptyList(); // no partitions, bail early. } - return getPartitionsFromPartitionIds(catName, dbName, tblName, null, partitionIds); + return getPartitionsFromPartitionIds(catName, dbName, tblName, null, partitionIds, Collections.emptyList()); } }); } @@ -489,17 +515,40 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ Boolean isViewTable = isViewTable(filter.table); String catName = filter.table.isSetCatName() ? filter.table.getCatName() : DEFAULT_CATALOG_NAME; - List partitionIds = getPartitionIdsViaSqlFilter(catName, + List partitionIds = getPartitionIdsViaSqlFilter(catName, filter.table.getDbName(), filter.table.getTableName(), filter.filter, filter.params, filter.joins, max); if (partitionIds.isEmpty()) { return Collections.emptyList(); // no partitions, bail early. } - return Batchable.runBatched(batchSize, partitionIds, new Batchable() { + return Batchable.runBatched(batchSize, partitionIds, new Batchable() { @Override - public List run(List input) throws MetaException { + public List run(List input) throws MetaException { return getPartitionsFromPartitionIds(catName, filter.table.getDbName(), - filter.table.getTableName(), isViewTable, input); + filter.table.getTableName(), isViewTable, input, Collections.emptyList()); + } + }); + } + + public List getPartitionSpecsUsingProjection(Table tbl, + final List partitionFields, final String paramKeys, final boolean excludeKeys) + throws MetaException { + final String tblName = tbl.getTableName(); + final String dbName = tbl.getDbName(); + final String catName = tbl.getCatName(); + //TODO add support for filter + List partitionIds = + getPartitionIdsViaSqlFilter(catName, dbName, tblName, null, Collections.emptyList(), + Collections.emptyList(), null); + if (partitionIds.isEmpty()) { + return Collections.emptyList(); + } + // Get full objects. For Oracle/etc. do it in batches. + return Batchable.runBatched(batchSize, partitionIds, new Batchable() { + @Override + public List run(List input) throws MetaException { + return getPartitionsUsingProjection(tbl, catName, dbName, tblName, input, partitionFields, + paramKeys, excludeKeys); } }); } @@ -531,17 +580,17 @@ public boolean generateSqlFilterForPushdown( */ public List getPartitions(String catName, String dbName, String tblName, Integer max) throws MetaException { - List partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, + List partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName, null, Collections.emptyList(), Collections.emptyList(), max); if (partitionIds.isEmpty()) { return Collections.emptyList(); // no partitions, bail early. } // Get full objects. For Oracle/etc. do it in batches. - List result = Batchable.runBatched(batchSize, partitionIds, new Batchable() { + List result = Batchable.runBatched(batchSize, partitionIds, new Batchable() { @Override - public List run(List input) throws MetaException { - return getPartitionsFromPartitionIds(catName, dbName, tblName, null, input); + public List run(List input) throws MetaException { + return getPartitionsFromPartitionIds(catName, dbName, tblName, null, input, Collections.emptyList()); } }); return result; @@ -583,7 +632,7 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw * @param max The maximum number of partitions to return. * @return List of partition objects. */ - private List getPartitionIdsViaSqlFilter( + private List getPartitionIdsViaSqlFilter( String catName, String dbName, String tblName, String sqlFilter, List paramsForFilter, List joinsForFilter, Integer max) throws MetaException { @@ -619,41 +668,56 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw } List sqlResult = executeWithArray(query, params, queryText); long queryTime = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, queryTime); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, queryTime); if (sqlResult.isEmpty()) { return Collections.emptyList(); // no partitions, bail early. } - List result = new ArrayList(sqlResult.size()); + List result = new ArrayList<>(sqlResult.size()); for (Object fields : sqlResult) { - result.add(extractSqlLong(fields)); + result.add(MetastoreDirectSqlUtils.extractSqlLong(fields)); } query.closeAll(); return result; } + private List getPartitionsUsingProjection(Table tbl, String catName, String dbName, + String tblName, List partIdList, List projectionFields, String paramKeys, + boolean excludeKeys) throws MetaException { + // check if table object has table type as view + Boolean isView = isViewTable(tbl); + if (isView == null) { + isView = isViewTable(catName, dbName, tblName); + } + PartitionProjectionEvaluator projectionEvaluator = + new PartitionProjectionEvaluator(pm, fieldnameToTableName, projectionFields, + convertMapNullsToEmptyStrings, isView, paramKeys, excludeKeys); + return projectionEvaluator.getPartitionsUsingProjectionList(tbl, partIdList); + } + /** Should be called with the list short enough to not trip up Oracle/etc. */ private List getPartitionsFromPartitionIds(String catName, String dbName, String tblName, - Boolean isView, List partIdList) throws MetaException { + Boolean isView, List partIdList, List projectionFields) throws MetaException { + boolean doTrace = LOG.isDebugEnabled(); int idStringWidth = (int)Math.ceil(Math.log10(partIdList.size())) + 1; // 1 for comma int sbCapacity = partIdList.size() * idStringWidth; - - String partIds = getIdListForIn(partIdList); - // Get most of the fields for the IDs provided. // Assume db and table names are the same for all partition, as provided in arguments. + String partIds = MetaStoreUtils.getIdListForIn(partIdList); String queryText = - "select " + PARTITIONS + ".\"PART_ID\", " + SDS + ".\"SD_ID\", " + SDS + ".\"CD_ID\"," - + " " + SERDES + ".\"SERDE_ID\", " + PARTITIONS + ".\"CREATE_TIME\"," - + " " + PARTITIONS + ".\"LAST_ACCESS_TIME\", " + SDS + ".\"INPUT_FORMAT\", " + SDS + ".\"IS_COMPRESSED\"," - + " " + SDS + ".\"IS_STOREDASSUBDIRECTORIES\", " + SDS + ".\"LOCATION\", " + SDS + ".\"NUM_BUCKETS\"," - + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\", " + PARTITIONS - + ".\"WRITE_ID\"" + " from " + PARTITIONS + "" - + " left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + ".\"SD_ID\" " - + " left outer join " + SERDES + " on " + SDS + ".\"SERDE_ID\" = " + SERDES + ".\"SERDE_ID\" " - + "where \"PART_ID\" in (" + partIds + ") order by \"PART_NAME\" asc"; + "select " + PARTITIONS + ".\"PART_ID\", " + SDS + ".\"SD_ID\", " + SDS + ".\"CD_ID\"," + " " + + SERDES + ".\"SERDE_ID\", " + PARTITIONS + ".\"CREATE_TIME\"," + " " + PARTITIONS + + ".\"LAST_ACCESS_TIME\", " + SDS + ".\"INPUT_FORMAT\", " + SDS + ".\"IS_COMPRESSED\"," + + " " + SDS + ".\"IS_STOREDASSUBDIRECTORIES\", " + SDS + ".\"LOCATION\", " + SDS + + ".\"NUM_BUCKETS\"," + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + + SERDES + ".\"SLIB\", " + PARTITIONS + ".\"WRITE_ID\"" + " from " + PARTITIONS + "" + + " left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + + ".\"SD_ID\" " + " left outer join " + SERDES + " on " + SDS + ".\"SERDE_ID\" = " + + SERDES + ".\"SERDE_ID\" " + "where \"PART_ID\" in (" + partIds + + ") order by \"PART_NAME\" asc"; + long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); List sqlResult = executeWithArray(query, null, queryText); @@ -674,22 +738,14 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw tblName = tblName.toLowerCase(); dbName = dbName.toLowerCase(); catName = normalizeSpace(catName).toLowerCase(); + partitions.navigableKeySet(); for (Object[] fields : sqlResult) { // Here comes the ugly part... - long partitionId = extractSqlLong(fields[0]); - Long sdId = extractSqlLong(fields[1]); - Long colId = extractSqlLong(fields[2]); - Long serdeId = extractSqlLong(fields[3]); - // A partition must have at least sdId and serdeId set, or nothing set if it's a view. - if (sdId == null || serdeId == null) { - if (isView == null) { - isView = isViewTable(catName, dbName, tblName); - } - if ((sdId != null || colId != null || serdeId != null) || !isView) { - throw new MetaException("Unexpected null for one of the IDs, SD " + sdId + - ", serde " + serdeId + " for a " + (isView ? "" : "non-") + " view"); - } - } + long partitionId = MetastoreDirectSqlUtils.extractSqlLong(fields[0]); + Long sdId = MetastoreDirectSqlUtils.extractSqlLong(fields[1]); + Long colId = MetastoreDirectSqlUtils.extractSqlLong(fields[2]); + Long serdeId = MetastoreDirectSqlUtils.extractSqlLong(fields[3]); + isView = validateObjectIds(catName, dbName, tblName, isView, sdId, colId, serdeId); Partition part = new Partition(); orderedResult.add(part); @@ -699,9 +755,9 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw part.setCatName(catName); part.setDbName(dbName); part.setTableName(tblName); - if (fields[4] != null) part.setCreateTime(extractSqlInt(fields[4])); - if (fields[5] != null) part.setLastAccessTime(extractSqlInt(fields[5])); - Long writeId = extractSqlLong(fields[14]); + if (fields[4] != null) part.setCreateTime(MetastoreDirectSqlUtils.extractSqlInt(fields[4])); + if (fields[5] != null) part.setLastAccessTime(MetastoreDirectSqlUtils.extractSqlInt(fields[5])); + Long writeId = MetastoreDirectSqlUtils.extractSqlLong(fields[14]); if (writeId != null) { part.setWriteId(writeId); } @@ -724,12 +780,12 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw sd.setSkewedInfo(new SkewedInfo(new ArrayList(), new ArrayList>(), new HashMap, String>())); sd.setInputFormat((String)fields[6]); - Boolean tmpBoolean = extractSqlBoolean(fields[7]); + Boolean tmpBoolean = MetastoreDirectSqlUtils.extractSqlBoolean(fields[7]); if (tmpBoolean != null) sd.setCompressed(tmpBoolean); - tmpBoolean = extractSqlBoolean(fields[8]); + tmpBoolean = MetastoreDirectSqlUtils.extractSqlBoolean(fields[8]); if (tmpBoolean != null) sd.setStoredAsSubDirectories(tmpBoolean); sd.setLocation((String)fields[9]); - if (fields[10] != null) sd.setNumBuckets(extractSqlInt(fields[10])); + if (fields[10] != null) sd.setNumBuckets(MetastoreDirectSqlUtils.extractSqlInt(fields[10])); sd.setOutputFormat((String)fields[11]); sdSb.append(sdId).append(","); part.setSd(sd); @@ -760,30 +816,13 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw Deadline.checkTimeout(); } query.closeAll(); - timingTrace(doTrace, queryText, start, queryTime); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, queryTime); // Now get all the one-to-many things. Start with partitions. - queryText = "select \"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + PARTITION_PARAMS + "" - + " where \"PART_ID\" in (" + partIds + ") and \"PARAM_KEY\" is not null" - + " order by \"PART_ID\" asc"; - loopJoinOrderedResult(partitions, queryText, 0, new ApplyFunc() { - @Override - public void apply(Partition t, Object[] fields) { - t.putToParameters((String)fields[1], extractSqlClob(fields[2])); - }}); - // Perform conversion of null map values - for (Partition t : partitions.values()) { - t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); - } - - queryText = "select \"PART_ID\", \"PART_KEY_VAL\" from " + PARTITION_KEY_VALS + "" - + " where \"PART_ID\" in (" + partIds + ")" - + " order by \"PART_ID\" asc, \"INTEGER_IDX\" asc"; - loopJoinOrderedResult(partitions, queryText, 0, new ApplyFunc() { - @Override - public void apply(Partition t, Object[] fields) { - t.addToValues((String)fields[1]); - }}); + MetastoreDirectSqlUtils + .setPartitionParameters(PARTITION_PARAMS, convertMapNullsToEmptyStrings, pm, partIds, partitions); + + MetastoreDirectSqlUtils.setPartitionValues(PARTITION_KEY_VALS, pm, partIds, partitions); // Prepare IN (blah) lists for the following queries. Cut off the final ','s. if (sdSb.length() == 0) { @@ -796,164 +835,55 @@ public void apply(Partition t, Object[] fields) { String colIds = trimCommaList(colsSb); // Get all the stuff for SD. Don't do empty-list check - we expect partitions do have SDs. - queryText = "select \"SD_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SD_PARAMS + "" - + " where \"SD_ID\" in (" + sdIds + ") and \"PARAM_KEY\" is not null" - + " order by \"SD_ID\" asc"; - loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { - @Override - public void apply(StorageDescriptor t, Object[] fields) { - t.putToParameters((String)fields[1], extractSqlClob(fields[2])); - }}); - // Perform conversion of null map values - for (StorageDescriptor t : sds.values()) { - t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); - } - - queryText = "select \"SD_ID\", \"COLUMN_NAME\", " + SORT_COLS + ".\"ORDER\"" - + " from " + SORT_COLS + "" - + " where \"SD_ID\" in (" + sdIds + ")" - + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; - loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { - @Override - public void apply(StorageDescriptor t, Object[] fields) { - if (fields[2] == null) return; - t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]))); - }}); - - queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from " + BUCKETING_COLS + "" - + " where \"SD_ID\" in (" + sdIds + ")" - + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; - loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { - @Override - public void apply(StorageDescriptor t, Object[] fields) { - t.addToBucketCols((String)fields[1]); - }}); + MetastoreDirectSqlUtils.setSDParameters(SD_PARAMS, convertMapNullsToEmptyStrings, pm, sds, sdIds); + + MetastoreDirectSqlUtils.setSDSortCols(SORT_COLS, pm, sds, sdIds); + + MetastoreDirectSqlUtils.setSDBucketCols(BUCKETING_COLS, pm, sds, sdIds); // Skewed columns stuff. - queryText = "select \"SD_ID\", \"SKEWED_COL_NAME\" from " + SKEWED_COL_NAMES + "" - + " where \"SD_ID\" in (" + sdIds + ")" - + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; - boolean hasSkewedColumns = - loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { - @Override - public void apply(StorageDescriptor t, Object[] fields) { - if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo()); - t.getSkewedInfo().addToSkewedColNames((String)fields[1]); - }}) > 0; + boolean hasSkewedColumns = MetastoreDirectSqlUtils + .setSkewedColNames(SKEWED_COL_NAMES, pm, sds, sdIds); // Assume we don't need to fetch the rest of the skewed column data if we have no columns. if (hasSkewedColumns) { // We are skipping the SKEWED_STRING_LIST table here, as it seems to be totally useless. - queryText = - "select " + SKEWED_VALUES + ".\"SD_ID_OID\"," - + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\"," - + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" " - + "from " + SKEWED_VALUES + " " - + " left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_VALUES + "." - + "\"STRING_LIST_ID_EID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" " - + "where " + SKEWED_VALUES + ".\"SD_ID_OID\" in (" + sdIds + ") " - + " and " + SKEWED_VALUES + ".\"STRING_LIST_ID_EID\" is not null " - + " and " + SKEWED_VALUES + ".\"INTEGER_IDX\" >= 0 " - + "order by " + SKEWED_VALUES + ".\"SD_ID_OID\" asc, " + SKEWED_VALUES + ".\"INTEGER_IDX\" asc," - + " " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc"; - loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { - private Long currentListId; - private List currentList; - @Override - public void apply(StorageDescriptor t, Object[] fields) throws MetaException { - if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo()); - // Note that this is not a typical list accumulator - there's no call to finalize - // the last list. Instead we add list to SD first, as well as locally to add elements. - if (fields[1] == null) { - currentList = null; // left outer join produced a list with no values - currentListId = null; - t.getSkewedInfo().addToSkewedColValues(Collections.emptyList()); - } else { - long fieldsListId = extractSqlLong(fields[1]); - if (currentListId == null || fieldsListId != currentListId) { - currentList = new ArrayList(); - currentListId = fieldsListId; - t.getSkewedInfo().addToSkewedColValues(currentList); - } - currentList.add((String)fields[2]); - } - }}); + MetastoreDirectSqlUtils + .setSkewedColValues(SKEWED_STRING_LIST_VALUES, SKEWED_VALUES, pm, sds, sdIds); // We are skipping the SKEWED_STRING_LIST table here, as it seems to be totally useless. - queryText = - "select " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\"," - + " " + SKEWED_STRING_LIST_VALUES + ".STRING_LIST_ID," - + " " + SKEWED_COL_VALUE_LOC_MAP + ".\"LOCATION\"," - + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" " - + "from " + SKEWED_COL_VALUE_LOC_MAP + "" - + " left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_COL_VALUE_LOC_MAP + "." - + "\"STRING_LIST_ID_KID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" " - + "where " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" in (" + sdIds + ")" - + " and " + SKEWED_COL_VALUE_LOC_MAP + ".\"STRING_LIST_ID_KID\" is not null " - + "order by " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" asc," - + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" asc," - + " " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc"; - - loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { - private Long currentListId; - private List currentList; - @Override - public void apply(StorageDescriptor t, Object[] fields) throws MetaException { - if (!t.isSetSkewedInfo()) { - SkewedInfo skewedInfo = new SkewedInfo(); - skewedInfo.setSkewedColValueLocationMaps(new HashMap, String>()); - t.setSkewedInfo(skewedInfo); - } - Map, String> skewMap = t.getSkewedInfo().getSkewedColValueLocationMaps(); - // Note that this is not a typical list accumulator - there's no call to finalize - // the last list. Instead we add list to SD first, as well as locally to add elements. - if (fields[1] == null) { - currentList = new ArrayList(); // left outer join produced a list with no values - currentListId = null; - } else { - long fieldsListId = extractSqlLong(fields[1]); - if (currentListId == null || fieldsListId != currentListId) { - currentList = new ArrayList(); - currentListId = fieldsListId; - } else { - skewMap.remove(currentList); // value based compare.. remove first - } - currentList.add((String)fields[3]); - } - skewMap.put(currentList, (String)fields[2]); - }}); + MetastoreDirectSqlUtils + .setSkewedColLocationMaps(SKEWED_COL_VALUE_LOC_MAP, SKEWED_STRING_LIST_VALUES, pm, sds, sdIds); } // if (hasSkewedColumns) // Get FieldSchema stuff if any. if (!colss.isEmpty()) { // We are skipping the CDS table here, as it seems to be totally useless. - queryText = "select \"CD_ID\", \"COMMENT\", \"COLUMN_NAME\", \"TYPE_NAME\"" - + " from " + COLUMNS_V2 + " where \"CD_ID\" in (" + colIds + ")" - + " order by \"CD_ID\" asc, \"INTEGER_IDX\" asc"; - loopJoinOrderedResult(colss, queryText, 0, new ApplyFunc>() { - @Override - public void apply(List t, Object[] fields) { - t.add(new FieldSchema((String)fields[2], extractSqlClob(fields[3]), (String)fields[1])); - }}); + MetastoreDirectSqlUtils.setSDCols(COLUMNS_V2, pm, colss, colIds); } // Finally, get all the stuff for serdes - just the params. - queryText = "select \"SERDE_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SERDE_PARAMS + "" - + " where \"SERDE_ID\" in (" + serdeIds + ") and \"PARAM_KEY\" is not null" - + " order by \"SERDE_ID\" asc"; - loopJoinOrderedResult(serdes, queryText, 0, new ApplyFunc() { - @Override - public void apply(SerDeInfo t, Object[] fields) { - t.putToParameters((String)fields[1], extractSqlClob(fields[2])); - }}); - // Perform conversion of null map values - for (SerDeInfo t : serdes.values()) { - t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); - } + MetastoreDirectSqlUtils + .setSerdeParams(SERDE_PARAMS, convertMapNullsToEmptyStrings, pm, serdes, serdeIds); return orderedResult; } + private Boolean validateObjectIds(String catName, String dbName, String tblName, Boolean isView, + Long sdId, Long colId, Long serdeId) throws MetaException { + // A partition must have at least sdId and serdeId set, or nothing set if it's a view. + if (sdId == null || serdeId == null) { + if (isView == null) { + isView = isViewTable(catName, dbName, tblName); + } + if ((sdId != null || colId != null || serdeId != null) || !isView) { + throw new MetaException("Unexpected null for one of the IDs, SD " + sdId + + ", serde " + serdeId + " for a " + (isView ? "" : "non-") + " view"); + } + } + return isView; + } + public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws MetaException { boolean doTrace = LOG.isDebugEnabled(); String catName = filter.table.getCatName().toLowerCase(); @@ -981,59 +911,12 @@ public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws Meta long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); query.setUnique(true); - int sqlResult = extractSqlInt(query.executeWithArray(params)); + int sqlResult = MetastoreDirectSqlUtils.extractSqlInt(query.executeWithArray(params)); long queryTime = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, queryTime); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, queryTime); return sqlResult; } - - private void timingTrace(boolean doTrace, String queryText, long start, long queryTime) { - if (!doTrace) return; - LOG.debug("Direct SQL query in " + (queryTime - start) / 1000000.0 + "ms + " + - (System.nanoTime() - queryTime) / 1000000.0 + "ms, the query is [" + queryText + "]"); - } - - static Long extractSqlLong(Object obj) throws MetaException { - if (obj == null) return null; - if (!(obj instanceof Number)) { - throw new MetaException("Expected numeric type but got " + obj.getClass().getName()); - } - return ((Number)obj).longValue(); - } - - /** - * Convert a boolean value returned from the RDBMS to a Java Boolean object. - * MySQL has booleans, but e.g. Derby uses 'Y'/'N' mapping. - * - * @param value - * column value from the database - * @return The Boolean value of the database column value, null if the column - * value is null - * @throws MetaException - * if the column value cannot be converted into a Boolean object - */ - private static Boolean extractSqlBoolean(Object value) throws MetaException { - if (value == null) { - return null; - } - if (value instanceof Boolean) { - return (Boolean)value; - } - if (value instanceof String) { - try { - return BooleanUtils.toBooleanObject((String) value, "Y", "N", null); - } catch (IllegalArgumentException iae) { - // NOOP - } - } - throw new MetaException("Cannot extract boolean from column value " + value); - } - - private int extractSqlInt(Object field) { - return ((Number)field).intValue(); - } - private String extractSqlString(Object value) { if (value == null) return null; return value.toString(); @@ -1048,21 +931,6 @@ static Double extractSqlDouble(Object obj) throws MetaException { return ((Number) obj).doubleValue(); } - private String extractSqlClob(Object value) { - if (value == null) return null; - try { - if (value instanceof Clob) { - // we trim the Clob value to a max length an int can hold - int maxLength = (((Clob)value).length() < Integer.MAX_VALUE - 2) ? (int)((Clob)value).length() : Integer.MAX_VALUE - 2; - return ((Clob)value).getSubString(1L, maxLength); - } else { - return value.toString(); - } - } catch (SQLException sqle) { - return null; - } - } - static byte[] extractSqlBlob(Object value) throws MetaException { if (value == null) return null; @@ -1087,18 +955,6 @@ else if (value instanceof byte[]) { } } - /** - * Helper method for preparing for "SOMETHING_ID in (...)" to use in future queries. - * @param objectIds the objectId collection - * @return The concatenated list - * @throws MetaException If the list contains wrong data - */ - private static String getIdListForIn(List objectIds) throws MetaException { - return objectIds.stream() - .map(i -> i.toString()) - .collect(Collectors.joining(",")); - } - private static String trimCommaList(StringBuilder sb) { if (sb.length() > 0) { sb.setLength(sb.length() - 1); @@ -1106,55 +962,6 @@ private static String trimCommaList(StringBuilder sb) { return sb.toString(); } - private abstract class ApplyFunc { - public abstract void apply(Target t, Object[] fields) throws MetaException; - } - - /** - * Merges applies the result of a PM SQL query into a tree of object. - * Essentially it's an object join. DN could do this for us, but it issues queries - * separately for every object, which is suboptimal. - * @param tree The object tree, by ID. - * @param queryText The query text. - * @param keyIndex Index of the Long column corresponding to the map ID in query result rows. - * @param func The function that is called on each (object,row) pair with the same id. - * @return the count of results returned from the query. - */ - private int loopJoinOrderedResult(TreeMap tree, - String queryText, int keyIndex, ApplyFunc func) throws MetaException { - boolean doTrace = LOG.isDebugEnabled(); - long start = doTrace ? System.nanoTime() : 0; - Query query = pm.newQuery("javax.jdo.query.SQL", queryText); - Object result = query.execute(); - long queryTime = doTrace ? System.nanoTime() : 0; - if (result == null) { - query.closeAll(); - return 0; - } - List list = ensureList(result); - Iterator iter = list.iterator(); - Object[] fields = null; - for (Map.Entry entry : tree.entrySet()) { - if (fields == null && !iter.hasNext()) break; - long id = entry.getKey(); - while (fields != null || iter.hasNext()) { - if (fields == null) { - fields = iter.next(); - } - long nestedId = extractSqlLong(fields[keyIndex]); - if (nestedId < id) throw new MetaException("Found entries for unknown ID " + nestedId); - if (nestedId > id) break; // fields belong to one of the next entries - func.apply(entry.getValue(), fields); - fields = null; - } - Deadline.checkTimeout(); - } - int rv = list.size(); - query.closeAll(); - timingTrace(doTrace, queryText, start, queryTime); - return rv; - } - private static class PartitionFilterGenerator extends TreeVisitor { private final Table table; private final FilterBuilder filterBuffer; @@ -1409,13 +1216,13 @@ public ColumnStatistics getTableStats(final String catName, final String dbName, long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); Object qResult = executeWithArray(query, params, queryText); - timingTrace(doTrace, queryText0 + "...)", start, (doTrace ? System.nanoTime() : 0)); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText0 + "...)", start, (doTrace ? System.nanoTime() : 0)); if (qResult == null) { query.closeAll(); return null; } addQueryAfterUse(query); - return ensureList(qResult); + return MetastoreDirectSqlUtils.ensureList(qResult); } }; List list = Batchable.runBatched(batchSize, colNames, b); @@ -1519,11 +1326,11 @@ private long partsFoundForPartitions( Object qResult = executeWithArray(query, prepareParams( catName, dbName, tableName, inputPartNames, inputColName), queryText); long end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); ForwardQueryResult fqr = (ForwardQueryResult) qResult; Iterator iter = fqr.iterator(); while (iter.hasNext()) { - if (extractSqlLong(iter.next()) == inputColName.size()) { + if (MetastoreDirectSqlUtils.extractSqlLong(iter.next()) == inputColName.size()) { partsFound++; } } @@ -1580,8 +1387,8 @@ private long partsFoundForPartitions( return colStatsForDB; } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); - List list = ensureList(qResult); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); + List list = MetastoreDirectSqlUtils.ensureList(qResult); for (Object[] row : list) { String tblName = (String) row[0]; String partName = (String) row[1]; @@ -1670,8 +1477,8 @@ private long partsFoundForPartitions( return Collections.emptyList(); } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); - List list = ensureList(qResult); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); + List list = MetastoreDirectSqlUtils.ensureList(qResult); List colStats = new ArrayList(list.size()); for (Object[] row : list) { colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner)); @@ -1695,14 +1502,14 @@ private long partsFoundForPartitions( qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), queryText); end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); if (qResult == null) { query.closeAll(); return Collections.emptyList(); } List noExtraColumnNames = new ArrayList(); Map extraColumnNameTypeParts = new HashMap(); - List list = ensureList(qResult); + List list = MetastoreDirectSqlUtils.ensureList(qResult); for (Object[] row : list) { String colName = (String) row[0]; String colType = (String) row[1]; @@ -1710,7 +1517,7 @@ private long partsFoundForPartitions( // count(\"PARTITION_NAME\")==partNames.size() // Or, extrapolation is not possible for this column if // count(\"PARTITION_NAME\")<2 - Long count = extractSqlLong(row[2]); + Long count = MetastoreDirectSqlUtils.extractSqlLong(row[2]); if (count == partNames.size() || count < 2) { noExtraColumnNames.add(colName); } else { @@ -1732,13 +1539,13 @@ private long partsFoundForPartitions( query.closeAll(); return Collections.emptyList(); } - list = ensureList(qResult); + list = MetastoreDirectSqlUtils.ensureList(qResult); for (Object[] row : list) { colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner)); Deadline.checkTimeout(); } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); query.closeAll(); } // Extrapolation is needed for extraColumnNames. @@ -1765,7 +1572,7 @@ private long partsFoundForPartitions( query.closeAll(); return Collections.emptyList(); } - list = ensureList(qResult); + list = MetastoreDirectSqlUtils.ensureList(qResult); // see the indexes for colstats in IExtrapolatePartStatus Integer[] sumIndex = new Integer[] { 6, 10, 11, 15 }; for (Object[] row : list) { @@ -1778,7 +1585,7 @@ private long partsFoundForPartitions( Deadline.checkTimeout(); } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); query.closeAll(); for (Map.Entry entry : extraColumnNameTypeParts.entrySet()) { Object[] row = new Object[IExtrapolatePartStatus.colStatNames.length + 2]; @@ -1814,7 +1621,7 @@ private long partsFoundForPartitions( if (o == null) { row[2 + colStatIndex] = null; } else { - Long val = extractSqlLong(o); + Long val = MetastoreDirectSqlUtils.extractSqlLong(o); row[2 + colStatIndex] = val / sumVal * (partNames.size()); } } else if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Min @@ -1846,7 +1653,7 @@ private long partsFoundForPartitions( Object[] min = (Object[]) (fqr.get(0)); Object[] max = (Object[]) (fqr.get(fqr.size() - 1)); end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); query.closeAll(); if (min[0] == null || max[0] == null) { row[2 + colStatIndex] = null; @@ -1877,7 +1684,7 @@ private long partsFoundForPartitions( // "AVG_DECIMAL" row[2 + colStatIndex] = avg[colStatIndex - 12]; end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, queryText, start, end); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end); query.closeAll(); } } @@ -1952,13 +1759,13 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, Query query = pm.newQuery("javax.jdo.query.SQL", queryText); Object qResult = executeWithArray(query, prepareParams( catName, dbName, tableName, inputPartNames, inputColNames), queryText); - timingTrace(doTrace, queryText0, start, (doTrace ? System.nanoTime() : 0)); + MetastoreDirectSqlUtils.timingTrace(doTrace, queryText0, start, (doTrace ? System.nanoTime() : 0)); if (qResult == null) { query.closeAll(); return Collections.emptyList(); } addQueryAfterUse(query); - return ensureList(qResult); + return MetastoreDirectSqlUtils.ensureList(qResult); } }; try { @@ -2011,8 +1818,9 @@ private ColumnStatistics makeColumnStats( // LastAnalyzed is stored per column but thrift has it per several; // get the lowest for now as nobody actually uses this field. Object laObj = row[offset + 15]; - if (laObj != null && (!csd.isSetLastAnalyzed() || csd.getLastAnalyzed() > extractSqlLong(laObj))) { - csd.setLastAnalyzed(extractSqlLong(laObj)); + if (laObj != null && (!csd.isSetLastAnalyzed() || csd.getLastAnalyzed() > MetastoreDirectSqlUtils + .extractSqlLong(laObj))) { + csd.setLastAnalyzed(MetastoreDirectSqlUtils.extractSqlLong(laObj)); } csos.add(prepareCSObj(row, offset)); Deadline.checkTimeout(); @@ -2021,14 +1829,6 @@ private ColumnStatistics makeColumnStats( return result; } - @SuppressWarnings("unchecked") - private List ensureList(Object result) throws MetaException { - if (!(result instanceof List)) { - throw new MetaException("Wrong result type " + result.getClass()); - } - return (List)result; - } - private String makeParams(int size) { // W/ size 0, query will fail, but at least we'd get to see the query in debug output. return (size == 0) ? "" : repeat(",?", size).substring(1); @@ -2036,21 +1836,7 @@ private String makeParams(int size) { @SuppressWarnings("unchecked") private T executeWithArray(Query query, Object[] params, String sql) throws MetaException { - try { - return (T)((params == null) ? query.execute() : query.executeWithArray(params)); - } catch (Exception ex) { - String error = "Failed to execute [" + sql + "] with parameters ["; - if (params != null) { - boolean isFirst = true; - for (Object param : params) { - error += (isFirst ? "" : ", ") + param; - isFirst = false; - } - } - LOG.warn(error + "]", ex); - // We just logged an exception with (in case of JDO) a humongous callstack. Make a new one. - throw new MetaException("See previous errors; " + ex.getMessage()); - } + return MetastoreDirectSqlUtils.executeWithArray(query, params, sql); } /** @@ -2128,12 +1914,12 @@ public void prepareTxn() throws MetaException { } Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryParams, pms.toArray(), queryText)); if (!sqlResult.isEmpty()) { for (Object[] line : sqlResult) { - int enableValidateRely = extractSqlInt(line[11]); + int enableValidateRely = MetastoreDirectSqlUtils.extractSqlInt(line[11]); boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; @@ -2144,9 +1930,9 @@ public void prepareTxn() throws MetaException { extractSqlString(line[3]), extractSqlString(line[4]), extractSqlString(line[5]), - extractSqlInt(line[6]), - extractSqlInt(line[7]), - extractSqlInt(line[8]), + MetastoreDirectSqlUtils.extractSqlInt(line[6]), + MetastoreDirectSqlUtils.extractSqlInt(line[7]), + MetastoreDirectSqlUtils.extractSqlInt(line[8]), extractSqlString(line[9]), extractSqlString(line[10]), enable, @@ -2195,12 +1981,12 @@ public void prepareTxn() throws MetaException { } Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryParams, pms.toArray(), queryText)); if (!sqlResult.isEmpty()) { for (Object[] line : sqlResult) { - int enableValidateRely = extractSqlInt(line[5]); + int enableValidateRely = MetastoreDirectSqlUtils.extractSqlInt(line[5]); boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; @@ -2208,7 +1994,7 @@ public void prepareTxn() throws MetaException { extractSqlString(line[0]), extractSqlString(line[1]), extractSqlString(line[2]), - extractSqlInt(line[3]), extractSqlString(line[4]), + MetastoreDirectSqlUtils.extractSqlInt(line[3]), extractSqlString(line[4]), enable, validate, rely); @@ -2253,12 +2039,12 @@ public void prepareTxn() throws MetaException { } Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryParams, pms.toArray(), queryText)); if (!sqlResult.isEmpty()) { for (Object[] line : sqlResult) { - int enableValidateRely = extractSqlInt(line[5]); + int enableValidateRely = MetastoreDirectSqlUtils.extractSqlInt(line[5]); boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; @@ -2267,7 +2053,7 @@ public void prepareTxn() throws MetaException { extractSqlString(line[0]), extractSqlString(line[1]), extractSqlString(line[2]), - extractSqlInt(line[3]), extractSqlString(line[4]), + MetastoreDirectSqlUtils.extractSqlInt(line[3]), extractSqlString(line[4]), enable, validate, rely)); @@ -2310,12 +2096,12 @@ public void prepareTxn() throws MetaException { } Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryParams, pms.toArray(), queryText)); if (!sqlResult.isEmpty()) { for (Object[] line : sqlResult) { - int enableValidateRely = extractSqlInt(line[4]); + int enableValidateRely = MetastoreDirectSqlUtils.extractSqlInt(line[4]); boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; @@ -2371,12 +2157,12 @@ public void prepareTxn() throws MetaException { } Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryParams, pms.toArray(), queryText)); if (!sqlResult.isEmpty()) { for (Object[] line : sqlResult) { - int enableValidateRely = extractSqlInt(line[4]); + int enableValidateRely = MetastoreDirectSqlUtils.extractSqlInt(line[4]); boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; @@ -2434,12 +2220,12 @@ public void prepareTxn() throws MetaException { } Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( queryParams, pms.toArray(), queryText)); if (!sqlResult.isEmpty()) { for (Object[] line : sqlResult) { - int enableValidateRely = extractSqlInt(line[4]); + int enableValidateRely = MetastoreDirectSqlUtils.extractSqlInt(line[4]); boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; @@ -2479,7 +2265,7 @@ public void dropPartitionsViaSqlFilter(final String catName, final String dbName public List run(List input) throws MetaException { String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")"; // Get partition ids - List partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName, + List partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName, filter, input, Collections.emptyList(), null); if (partitionIds.isEmpty()) { return Collections.emptyList(); // no partitions, bail early. @@ -2497,12 +2283,13 @@ public void dropPartitionsViaSqlFilter(final String catName, final String dbName * @throws MetaException If there is an SQL exception during the execution it converted to * MetaException */ - private void dropPartitionsByPartitionIds(List partitionIdList) throws MetaException { + private void dropPartitionsByPartitionIds(List partitionIdList) throws MetaException { String queryText; if (partitionIdList.isEmpty()) { return; } - String partitionIds = getIdListForIn(partitionIdList); + + String partitionIds = MetaStoreUtils.getIdListForIn(partitionIdList); // Get the corresponding SD_ID-s, CD_ID-s, SERDE_ID-s queryText = @@ -2512,7 +2299,8 @@ private void dropPartitionsByPartitionIds(List partitionIdList) throws M + "WHERE " + PARTITIONS + ".\"PART_ID\" in (" + partitionIds + ")"; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray(query, null, queryText)); + List sqlResult = MetastoreDirectSqlUtils + .ensureList(executeWithArray(query, null, queryText)); List sdIdList = new ArrayList<>(partitionIdList.size()); List columnDescriptorIdList = new ArrayList<>(1); @@ -2520,12 +2308,12 @@ private void dropPartitionsByPartitionIds(List partitionIdList) throws M if (!sqlResult.isEmpty()) { for (Object[] fields : sqlResult) { - sdIdList.add(extractSqlLong(fields[0])); - Long colId = extractSqlLong(fields[1]); + sdIdList.add(MetastoreDirectSqlUtils.extractSqlLong(fields[0])); + Long colId = MetastoreDirectSqlUtils.extractSqlLong(fields[1]); if (!columnDescriptorIdList.contains(colId)) { columnDescriptorIdList.add(colId); } - serdeIdList.add(extractSqlLong(fields[2])); + serdeIdList.add(MetastoreDirectSqlUtils.extractSqlLong(fields[2])); } } query.closeAll(); @@ -2586,7 +2374,7 @@ private void dropStorageDescriptors(List storageDescriptorIdList) throws return; } String queryText; - String sdIds = getIdListForIn(storageDescriptorIdList); + String sdIds = MetaStoreUtils.getIdListForIn(storageDescriptorIdList); // Get the corresponding SKEWED_STRING_LIST_ID data queryText = @@ -2595,18 +2383,19 @@ private void dropStorageDescriptors(List storageDescriptorIdList) throws + "WHERE " + SKEWED_VALUES + ".\"SD_ID_OID\" in (" + sdIds + ")"; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray(query, null, queryText)); + List sqlResult = MetastoreDirectSqlUtils + .ensureList(executeWithArray(query, null, queryText)); List skewedStringListIdList = new ArrayList<>(0); if (!sqlResult.isEmpty()) { for (Object[] fields : sqlResult) { - skewedStringListIdList.add(extractSqlLong(fields[0])); + skewedStringListIdList.add(MetastoreDirectSqlUtils.extractSqlLong(fields[0])); } } query.closeAll(); - String skewedStringListIds = getIdListForIn(skewedStringListIdList); + String skewedStringListIds = MetaStoreUtils.getIdListForIn(skewedStringListIdList); try { // Drop the SD params @@ -2675,7 +2464,7 @@ private void dropSerdes(List serdeIdList) throws MetaException { if (serdeIdList.isEmpty()) { return; } - String serdeIds = getIdListForIn(serdeIdList); + String serdeIds = MetaStoreUtils.getIdListForIn(serdeIdList); try { // Drop the serde params @@ -2705,7 +2494,7 @@ private void dropDanglingColumnDescriptors(List columnDescriptorIdList) return; } String queryText; - String colIds = getIdListForIn(columnDescriptorIdList); + String colIds = MetaStoreUtils.getIdListForIn(columnDescriptorIdList); // Drop column descriptor, if no relation left queryText = @@ -2714,13 +2503,14 @@ private void dropDanglingColumnDescriptors(List columnDescriptorIdList) + "WHERE " + SDS + ".\"CD_ID\" in (" + colIds + ") " + "GROUP BY " + SDS + ".\"CD_ID\""; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); - List sqlResult = ensureList(executeWithArray(query, null, queryText)); + List sqlResult = MetastoreDirectSqlUtils + .ensureList(executeWithArray(query, null, queryText)); List danglingColumnDescriptorIdList = new ArrayList<>(columnDescriptorIdList.size()); if (!sqlResult.isEmpty()) { for (Object[] fields : sqlResult) { - if (extractSqlInt(fields[1]) == 0) { - danglingColumnDescriptorIdList.add(extractSqlLong(fields[0])); + if (MetastoreDirectSqlUtils.extractSqlInt(fields[1]) == 0) { + danglingColumnDescriptorIdList.add(MetastoreDirectSqlUtils.extractSqlLong(fields[0])); } } } @@ -2728,7 +2518,7 @@ private void dropDanglingColumnDescriptors(List columnDescriptorIdList) if (!danglingColumnDescriptorIdList.isEmpty()) { try { - String danglingCDIds = getIdListForIn(danglingColumnDescriptorIdList); + String danglingCDIds = MetaStoreUtils.getIdListForIn(danglingColumnDescriptorIdList); // Drop the columns_v2 queryText = "delete from " + COLUMNS_V2 + " where \"CD_ID\" in (" + danglingCDIds + ")"; @@ -2785,7 +2575,7 @@ private void dropDanglingColumnDescriptors(List columnDescriptorIdList) LOG.debug("Running {}", queryText); Query query = pm.newQuery("javax.jdo.query.SQL", queryText); try { - List sqlResult = ensureList(executeWithArray( + List sqlResult = MetastoreDirectSqlUtils.ensureList(executeWithArray( query, new Object[] { dbName, catName, tableName }, queryText)); Map> result = new HashMap<>(); String lastPartName = null; @@ -2826,7 +2616,8 @@ private void getStatsTableListResult( LOG.debug("Running {}", queryText); Query query = pm.newQuery("javax.jdo.query.SQL", queryText); try { - List sqlResult = ensureList(executeWithArray(query, STATS_TABLE_TYPES, queryText)); + List sqlResult = MetastoreDirectSqlUtils + .ensureList(executeWithArray(query, STATS_TABLE_TYPES, queryText)); for (Object[] line : sqlResult) { result.add(new org.apache.hadoop.hive.common.TableName( extractSqlString(line[2]), extractSqlString(line[1]), extractSqlString(line[0]))); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDirectSqlUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDirectSqlUtils.java new file mode 100644 index 0000000000000000000000000000000000000000..aae27eb0243d4339509e90a0aa4d0070107bc7b4 --- /dev/null +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDirectSqlUtils.java @@ -0,0 +1,528 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import com.google.common.base.Joiner; +import org.apache.commons.lang.BooleanUtils; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jdo.PersistenceManager; +import javax.jdo.Query; +import java.sql.Clob; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +class MetastoreDirectSqlUtils { + private static final Logger LOG = LoggerFactory.getLogger(MetastoreDirectSqlUtils.class); + private MetastoreDirectSqlUtils() { + + } + @SuppressWarnings("unchecked") + static T executeWithArray(Query query, Object[] params, String sql) throws MetaException { + try { + return (T)((params == null) ? query.execute() : query.executeWithArray(params)); + } catch (Exception ex) { + String error = "Failed to execute [" + sql + "] with parameters ["; + if (params != null) { + boolean isFirst = true; + for (Object param : params) { + error += (isFirst ? "" : ", ") + param; + isFirst = false; + } + } + LOG.warn(error + "]", ex); + // We just logged an exception with (in case of JDO) a humongous callstack. Make a new one. + throw new MetaException("See previous errors; " + ex.getMessage()); + } + } + + @SuppressWarnings("unchecked") + static List ensureList(Object result) throws MetaException { + if (!(result instanceof List)) { + throw new MetaException("Wrong result type " + result.getClass()); + } + return (List)result; + } + + static Long extractSqlLong(Object obj) throws MetaException { + if (obj == null) return null; + if (!(obj instanceof Number)) { + throw new MetaException("Expected numeric type but got " + obj.getClass().getName()); + } + return ((Number)obj).longValue(); + } + + static void timingTrace(boolean doTrace, String queryText, long start, long queryTime) { + if (!doTrace) return; + LOG.debug("Direct SQL query in " + (queryTime - start) / 1000000.0 + "ms + " + + (System.nanoTime() - queryTime) / 1000000.0 + "ms, the query is [" + queryText + "]"); + } + + static int loopJoinOrderedResult(PersistenceManager pm, TreeMap tree, + String queryText, int keyIndex, ApplyFunc func) throws MetaException { + return loopJoinOrderedResult(pm, tree, queryText, null, keyIndex, func); + } + /** + * Merges applies the result of a PM SQL query into a tree of object. + * Essentially it's an object join. DN could do this for us, but it issues queries + * separately for every object, which is suboptimal. + * @param pm + * @param tree The object tree, by ID. + * @param queryText The query text. + * @param keyIndex Index of the Long column corresponding to the map ID in query result rows. + * @param func The function that is called on each (object,row) pair with the same id. + * @return the count of results returned from the query. + */ + static int loopJoinOrderedResult(PersistenceManager pm, TreeMap tree, + String queryText, Object[] parameters, int keyIndex, ApplyFunc func) throws MetaException { + boolean doTrace = LOG.isDebugEnabled(); + long start = doTrace ? System.nanoTime() : 0; + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + Object result = null; + if (parameters == null || parameters.length == 0) { + result = query.execute(); + } else { + result = query.executeWithArray(parameters); + } + long queryTime = doTrace ? System.nanoTime() : 0; + if (result == null) { + query.closeAll(); + return 0; + } + List list = ensureList(result); + Iterator iter = list.iterator(); + Object[] fields = null; + for (Map.Entry entry : tree.entrySet()) { + if (fields == null && !iter.hasNext()) break; + long id = entry.getKey(); + while (fields != null || iter.hasNext()) { + if (fields == null) { + fields = iter.next(); + } + long nestedId = extractSqlLong(fields[keyIndex]); + if (nestedId < id) throw new MetaException("Found entries for unknown ID " + nestedId); + if (nestedId > id) break; // fields belong to one of the next entries + func.apply(entry.getValue(), fields); + fields = null; + } + Deadline.checkTimeout(); + } + int rv = list.size(); + query.closeAll(); + timingTrace(doTrace, queryText, start, queryTime); + return rv; + } + + static void setPartitionParameters(String PARTITION_PARAMS, boolean convertMapNullsToEmptyStrings, + PersistenceManager pm, String partIds, TreeMap partitions) + throws MetaException { + String queryText; + queryText = "select \"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + PARTITION_PARAMS + "" + + " where \"PART_ID\" in (" + partIds + ") and \"PARAM_KEY\" is not null" + + " order by \"PART_ID\" asc"; + loopJoinOrderedResult(pm, partitions, queryText, 0, new ApplyFunc() { + @Override + public void apply(Partition t, Object[] fields) { + t.putToParameters((String)fields[1], (String)fields[2]); + }}); + // Perform conversion of null map values + for (Partition t : partitions.values()) { + t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); + } + } + + static void setPartitionParametersWithFilter(String PARTITION_PARAMS, + boolean convertMapNullsToEmptyStrings, PersistenceManager pm, String partIds, + TreeMap partitions, String paramKeyPattern, boolean excludeFlag) + throws MetaException { + StringBuilder queryTextBuilder = new StringBuilder("select \"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from ") + .append(PARTITION_PARAMS) + .append(" where \"PART_ID\" in (") + .append(partIds) + .append(") and \"PARAM_KEY\" is not null"); + Object[] queryParams; + if (paramKeyPattern != null && !paramKeyPattern.isEmpty()) { + queryTextBuilder.append(" and \"PARAM_KEY\" "); + queryTextBuilder.append((excludeFlag ? "NOT" : "") + " LIKE (?)"); + queryParams = new Object[] {paramKeyPattern}; + } else { + queryParams = null; + } + queryTextBuilder.append(" order by \"PART_ID\" asc"); + String queryText = queryTextBuilder.toString(); + loopJoinOrderedResult(pm, partitions, queryText, queryParams, 0, new ApplyFunc() { + @Override + public void apply(Partition t, Object[] fields) { + t.putToParameters((String) fields[1], (String) fields[2]); + } + }); + // Perform conversion of null map values + for (Partition t : partitions.values()) { + t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); + } + } + + static void setPartitionValues(String PARTITION_KEY_VALS, PersistenceManager pm, String partIds, + TreeMap partitions) + throws MetaException { + String queryText; + queryText = "select \"PART_ID\", \"PART_KEY_VAL\" from " + PARTITION_KEY_VALS + "" + + " where \"PART_ID\" in (" + partIds + ")" + + " order by \"PART_ID\" asc, \"INTEGER_IDX\" asc"; + loopJoinOrderedResult(pm, partitions, queryText, 0, new ApplyFunc() { + @Override + public void apply(Partition t, Object[] fields) { + t.addToValues((String)fields[1]); + }}); + } + + static String extractSqlClob(Object value) { + if (value == null) return null; + try { + if (value instanceof Clob) { + // we trim the Clob value to a max length an int can hold + int maxLength = (((Clob)value).length() < Integer.MAX_VALUE - 2) ? (int)((Clob)value).length() : Integer.MAX_VALUE - 2; + return ((Clob)value).getSubString(1L, maxLength); + } else { + return value.toString(); + } + } catch (SQLException sqle) { + return null; + } + } + + static void setSDParameters(String SD_PARAMS, boolean convertMapNullsToEmptyStrings, + PersistenceManager pm, TreeMap sds, String sdIds) + throws MetaException { + String queryText; + queryText = "select \"SD_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SD_PARAMS + "" + + " where \"SD_ID\" in (" + sdIds + ") and \"PARAM_KEY\" is not null" + + " order by \"SD_ID\" asc"; + loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + @Override + public void apply(StorageDescriptor t, Object[] fields) { + t.putToParameters((String)fields[1], extractSqlClob(fields[2])); + }}); + // Perform conversion of null map values + for (StorageDescriptor t : sds.values()) { + t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); + } + } + + static int extractSqlInt(Object field) { + return ((Number)field).intValue(); + } + + static void setSDSortCols(String SORT_COLS, List columnNames, PersistenceManager pm, + TreeMap sds, String sdIds) + throws MetaException { + StringBuilder queryTextBuilder = new StringBuilder("select \"SD_ID\""); + int counter = 0; + if (columnNames.contains("col")) { + counter++; + queryTextBuilder.append(", \"COLUMN_NAME\""); + } + if (columnNames.contains("order")) { + counter++; + queryTextBuilder.append(", \"ORDER\""); + } + queryTextBuilder + .append(" from ") + .append(SORT_COLS) + .append(" where \"SD_ID\" in (") + .append(sdIds) + .append(") order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"); + String queryText = queryTextBuilder.toString(); + final int finalCounter = counter; + loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + @Override + public void apply(StorageDescriptor t, Object[] fields) { + if (finalCounter > 1 && fields[2] == null) { + return; + } + Order order = new Order(); + if (finalCounter > 0) { + order.setCol((String) fields[1]); + } + if (finalCounter > 1) { + order.setOrder(extractSqlInt(fields[2])); + } + t.addToSortCols(order); + }}); + } + + static void setSDSortCols(String SORT_COLS, PersistenceManager pm, + TreeMap sds, String sdIds) + throws MetaException { + String queryText; + queryText = "select \"SD_ID\", \"COLUMN_NAME\", " + SORT_COLS + ".\"ORDER\"" + + " from " + SORT_COLS + "" + + " where \"SD_ID\" in (" + sdIds + ")" + + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; + loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + @Override + public void apply(StorageDescriptor t, Object[] fields) { + if (fields[2] == null) return; + t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]))); + }}); + } + + static void setSDBucketCols(String BUCKETING_COLS, PersistenceManager pm, + TreeMap sds, String sdIds) + throws MetaException { + String queryText; + queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from " + BUCKETING_COLS + "" + + " where \"SD_ID\" in (" + sdIds + ")" + + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; + loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + @Override + public void apply(StorageDescriptor t, Object[] fields) { + t.addToBucketCols((String)fields[1]); + }}); + } + + static boolean setSkewedColNames(String SKEWED_COL_NAMES, PersistenceManager pm, + TreeMap sds, String sdIds) + throws MetaException { + String queryText; + queryText = "select \"SD_ID\", \"SKEWED_COL_NAME\" from " + SKEWED_COL_NAMES + "" + + " where \"SD_ID\" in (" + sdIds + ")" + + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; + return loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + @Override + public void apply(StorageDescriptor t, Object[] fields) { + if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo()); + t.getSkewedInfo().addToSkewedColNames((String)fields[1]); + }}) > 0; + } + + static void setSkewedColValues(String SKEWED_STRING_LIST_VALUES, String SKEWED_VALUES, + PersistenceManager pm, TreeMap sds, String sdIds) + throws MetaException { + String queryText; + queryText = + "select " + SKEWED_VALUES + ".\"SD_ID_OID\"," + + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\"," + + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" " + + "from " + SKEWED_VALUES + " " + + " left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_VALUES + "." + + "\"STRING_LIST_ID_EID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" " + + "where " + SKEWED_VALUES + ".\"SD_ID_OID\" in (" + sdIds + ") " + + " and " + SKEWED_VALUES + ".\"STRING_LIST_ID_EID\" is not null " + + " and " + SKEWED_VALUES + ".\"INTEGER_IDX\" >= 0 " + + "order by " + SKEWED_VALUES + ".\"SD_ID_OID\" asc, " + SKEWED_VALUES + ".\"INTEGER_IDX\" asc," + + " " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc"; + loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + private Long currentListId; + private List currentList; + @Override + public void apply(StorageDescriptor t, Object[] fields) throws MetaException { + if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo()); + // Note that this is not a typical list accumulator - there's no call to finalize + // the last list. Instead we add list to SD first, as well as locally to add elements. + if (fields[1] == null) { + currentList = null; // left outer join produced a list with no values + currentListId = null; + t.getSkewedInfo().addToSkewedColValues(Collections.emptyList()); + } else { + long fieldsListId = extractSqlLong(fields[1]); + if (currentListId == null || fieldsListId != currentListId) { + currentList = new ArrayList(); + currentListId = fieldsListId; + t.getSkewedInfo().addToSkewedColValues(currentList); + } + currentList.add((String)fields[2]); + } + }}); + } + + static void setSkewedColLocationMaps(String SKEWED_COL_VALUE_LOC_MAP, + String SKEWED_STRING_LIST_VALUES, PersistenceManager pm, TreeMap sds, + String sdIds) + throws MetaException { + String queryText; + queryText = + "select " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\"," + + " " + SKEWED_STRING_LIST_VALUES + ".STRING_LIST_ID," + + " " + SKEWED_COL_VALUE_LOC_MAP + ".\"LOCATION\"," + + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" " + + "from " + SKEWED_COL_VALUE_LOC_MAP + "" + + " left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_COL_VALUE_LOC_MAP + "." + + "\"STRING_LIST_ID_KID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" " + + "where " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" in (" + sdIds + ")" + + " and " + SKEWED_COL_VALUE_LOC_MAP + ".\"STRING_LIST_ID_KID\" is not null " + + "order by " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" asc," + + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" asc," + + " " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc"; + + loopJoinOrderedResult(pm, sds, queryText, 0, new ApplyFunc() { + private Long currentListId; + private List currentList; + @Override + public void apply(StorageDescriptor t, Object[] fields) throws MetaException { + if (!t.isSetSkewedInfo()) { + SkewedInfo skewedInfo = new SkewedInfo(); + skewedInfo.setSkewedColValueLocationMaps(new HashMap, String>()); + t.setSkewedInfo(skewedInfo); + } + Map, String> skewMap = t.getSkewedInfo().getSkewedColValueLocationMaps(); + // Note that this is not a typical list accumulator - there's no call to finalize + // the last list. Instead we add list to SD first, as well as locally to add elements. + if (fields[1] == null) { + currentList = new ArrayList(); // left outer join produced a list with no values + currentListId = null; + } else { + long fieldsListId = extractSqlLong(fields[1]); + if (currentListId == null || fieldsListId != currentListId) { + currentList = new ArrayList(); + currentListId = fieldsListId; + } else { + skewMap.remove(currentList); // value based compare.. remove first + } + currentList.add((String)fields[3]); + } + skewMap.put(currentList, (String)fields[2]); + }}); + } + + static void setSDCols(String COLUMNS_V2, List columnNames, PersistenceManager pm, + TreeMap> colss, String colIds) + throws MetaException { + StringBuilder queryTextBuilder = new StringBuilder("select \"CD_ID\""); + int counter = 0; + if (columnNames.contains("name")) { + counter++; + queryTextBuilder.append(", \"COLUMN_NAME\""); + } + if (columnNames.contains("type")) { + counter++; + queryTextBuilder.append(", \"TYPE_NAME\""); + } + if (columnNames.contains("comment")) { + counter++; + queryTextBuilder.append(", \"COMMENT\""); + } + queryTextBuilder + .append(" from ") + .append(COLUMNS_V2) + .append(" where \"CD_ID\" in (") + .append(colIds) + .append(") order by \"CD_ID\" asc, \"INTEGER_IDX\" asc"); + String queryText = queryTextBuilder.toString(); + int finalCounter = counter; + loopJoinOrderedResult(pm, colss, queryText, 0, new ApplyFunc>() { + @Override + public void apply(List t, Object[] fields) { + FieldSchema fieldSchema = new FieldSchema(); + if (finalCounter > 0) { + fieldSchema.setName((String) fields[1]); + } + if (finalCounter > 1) { + fieldSchema.setType(extractSqlClob(fields[2])); + } + if (finalCounter > 2) { + fieldSchema.setComment((String) fields[3]); + } + t.add(fieldSchema); + }}); + } + + static void setSDCols(String COLUMNS_V2, PersistenceManager pm, + TreeMap> colss, String colIds) + throws MetaException { + String queryText; + queryText = "select \"CD_ID\", \"COMMENT\", \"COLUMN_NAME\", \"TYPE_NAME\"" + + " from " + COLUMNS_V2 + " where \"CD_ID\" in (" + colIds + ")" + + " order by \"CD_ID\" asc, \"INTEGER_IDX\" asc"; + loopJoinOrderedResult(pm, colss, queryText, 0, new ApplyFunc>() { + @Override + public void apply(List t, Object[] fields) { + t.add(new FieldSchema((String)fields[2], extractSqlClob(fields[3]), (String)fields[1])); + }}); + } + + static void setSerdeParams(String SERDE_PARAMS, boolean convertMapNullsToEmptyStrings, + PersistenceManager pm, TreeMap serdes, String serdeIds) throws MetaException { + String queryText; + queryText = "select \"SERDE_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SERDE_PARAMS + "" + + " where \"SERDE_ID\" in (" + serdeIds + ") and \"PARAM_KEY\" is not null" + + " order by \"SERDE_ID\" asc"; + loopJoinOrderedResult(pm, serdes, queryText, 0, new ApplyFunc() { + @Override + public void apply(SerDeInfo t, Object[] fields) { + t.putToParameters((String)fields[1], extractSqlClob(fields[2])); + }}); + // Perform conversion of null map values + for (SerDeInfo t : serdes.values()) { + t.setParameters(MetaStoreServerUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); + } + } + + /** + * Convert a boolean value returned from the RDBMS to a Java Boolean object. + * MySQL has booleans, but e.g. Derby uses 'Y'/'N' mapping. + * + * @param value + * column value from the database + * @return The Boolean value of the database column value, null if the column + * value is null + * @throws MetaException + * if the column value cannot be converted into a Boolean object + */ + static Boolean extractSqlBoolean(Object value) throws MetaException { + if (value == null) { + return null; + } + if (value instanceof Boolean) { + return (Boolean)value; + } + if (value instanceof String) { + try { + return BooleanUtils.toBooleanObject((String) value, "Y", "N", null); + } catch (IllegalArgumentException iae) { + // NOOP + } + } + throw new MetaException("Cannot extract boolean from column value " + value); + } + + abstract static class ApplyFunc { + public ApplyFunc() { + } + + public abstract void apply(Target t, Object[] fields) throws MetaException; + } +} diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index d27224b23580b4662a85c874b657847ed068c9a3..5befe201da581f286bf39be1071873d6ad1d6aab 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2028,8 +2028,11 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, return keys; } - private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException { + private SerDeInfo convertToSerDeInfo(MSerDeInfo ms, boolean allowNull) throws MetaException { if (ms == null) { + if (allowNull) { + return null; + } throw new MetaException("Invalid SerDeInfo object"); } SerDeInfo serde = @@ -2083,7 +2086,7 @@ private StorageDescriptor convertToStorageDescriptor( StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas), msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd .isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd - .getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd + .getSerDeInfo(), true), convertList(msd.getBucketCols()), convertToOrders(msd .getSortCols()), convertMap(msd.getParameters())); SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()), convertToSkewedValues(msd.getSkewedColValues()), @@ -2603,8 +2606,14 @@ private Partition convertToPart(MPartition mpart) throws MetaException { if (mpart == null) { return null; } - Partition p = new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase() - .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(), + //its possible that MPartition is partially filled, do null checks to avoid NPE + MTable table = mpart.getTable(); + String dbName = + table == null ? null : table.getDatabase() == null ? null : table.getDatabase().getName(); + String tableName = table == null ? null : table.getTableName(); + String catName = table == null ? null : + table.getDatabase() == null ? null : table.getDatabase().getCatalogName(); + Partition p = new Partition(convertList(mpart.getValues()), dbName, tableName, mpart.getCreateTime(), mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()), convertMap(mpart.getParameters())); p.setCatName(mpart.getTable().getDatabase().getCatalogName()); @@ -3479,7 +3488,6 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, return results; } - private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter) throws MetaException { Map params = new HashMap<>(); @@ -3607,17 +3615,23 @@ private void dropPartitionsNoTxn(String catName, String dbName, String tblName, private boolean doUseDirectSql; private long start; private Table table; + protected final List partitionFields; protected final String catName, dbName, tblName; private boolean success = false; protected T results = null; public GetHelper(String catalogName, String dbName, String tblName, - boolean allowSql, boolean allowJdo) - throws MetaException { + boolean allowSql, boolean allowJdo) throws MetaException { + this(catalogName, dbName, tblName, null, allowSql, allowJdo); + } + + public GetHelper(String catalogName, String dbName, String tblName, + List fields, boolean allowSql, boolean allowJdo) throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null; this.dbName = (dbName != null) ? normalizeIdentifier(dbName) : null; + this.partitionFields = fields; if (tblName != null) { this.tblName = normalizeIdentifier(tblName); } else { @@ -3800,7 +3814,12 @@ public Table getTable() { private abstract class GetListHelper extends GetHelper> { public GetListHelper(String catName, String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(catName, dbName, tblName, allowSql, allowJdo); + super(catName, dbName, tblName, null, allowSql, allowJdo); + } + + public GetListHelper(String catName, String dbName, String tblName, List fields, + boolean allowSql, boolean allowJdo) throws MetaException { + super(catName, dbName, tblName, fields, allowSql, allowJdo); } @Override @@ -3948,6 +3967,42 @@ protected boolean canUseDirectSql(GetHelper> ctx) throws MetaExc }.run(true); } + @Override + public List getPartitionSpecsByFilterAndProjection(final Table table, final boolean allowSql, + final boolean allowJdo, final List fieldList, final String paramKeys, final boolean excludeParamKeys) + throws MetaException, NoSuchObjectException { + if (fieldList == null || fieldList.isEmpty()) { + // no fields are requested. Fallback to regular getPartitions implementation to return all the fields + return getPartitionsInternal(table.getCatName(), table.getDbName(), table.getTableName(), -1, + allowSql, allowJdo); + } + return new GetListHelper(table.getCatName(), table.getDbName(), table.getTableName(), + fieldList, allowSql, allowJdo) { + + @Override + protected List getSqlResult(GetHelper> ctx) throws MetaException { + return directSql + .getPartitionSpecsUsingProjection(ctx.getTable(), ctx.partitionFields, paramKeys, + excludeParamKeys); + } + + @Override + protected List getJdoResult( + GetHelper> ctx) throws MetaException { + //TODO for single-valued fields we can use setRange() to implement projection of fields but + //JDO doesn't support multi-valued fields in setRange() so currently JDO implementation + //fallbacks to full-partition fetch + QueryWrapper queryWrapper = new QueryWrapper(); + try { + return convertToParts(listMPartitions(catName, dbName, tblName, -1, queryWrapper)); + } finally { + queryWrapper.close(); + } + } + }.run(true); + } + + /** * Gets the table object for a given table, throws if anything goes wrong. * @param dbName Database name. @@ -11107,7 +11162,7 @@ public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, Me if (mSerDeInfo == null) { throw new NoSuchObjectException("No SerDe named " + serDeName); } - SerDeInfo serde = convertToSerDeInfo(mSerDeInfo); + SerDeInfo serde = convertToSerDeInfo(mSerDeInfo, false); committed = commitTransaction(); return serde; } finally { @@ -11225,7 +11280,7 @@ private SchemaVersion convertToSchemaVersion(MSchemaVersion mSchemaVersion) thro schemaVersion.setName(mSchemaVersion.getName()); } if (mSchemaVersion.getSerDe() != null) { - schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe())); + schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe(), false)); } return schemaVersion; } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionProjectionEvaluator.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionProjectionEvaluator.java new file mode 100644 index 0000000000000000000000000000000000000000..7100cf4ba86798066bbd08c4038964f37a29a5c2 --- /dev/null +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionProjectionEvaluator.java @@ -0,0 +1,877 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SerdeType; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.thrift.TException; +import org.apache.thrift.TSerializer; +import org.apache.thrift.protocol.TSimpleJSONProtocol; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jdo.PersistenceManager; +import javax.jdo.Query; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.TreeMap; +import java.util.regex.Pattern; + +import static org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.extractSqlLong; + +public class PartitionProjectionEvaluator { + private static final Logger LOG = LoggerFactory.getLogger(PartitionProjectionEvaluator.class); + private final boolean convertMapNullsToEmptyStrings; + private final boolean isView; + private final String paramKeyPattern; + private final boolean excludeParamKeyPattern; + private Set projectionFields; + + interface PartitionFieldValueSetter { + void setValue(T part, PartitionFieldNode node, Object value) throws MetaException; + } + + private final ImmutableMap multiValuedFieldSetters = + new ImmutableMap.Builder() + .put("values", new PartitionValuesSetter()) + .put("parameters", new PartitionParametersSetter()) + .put("sd.cols", new PartitionSDColsSetter()) + .put("sd.bucketCols", new PartitionSDBucketColsSetter()) + .put("sd.sortCols", new PartitionSortColsSetter()) + .put("sd.parameters", new PartitionSDParametersSetter()) + .put("sd.serdeInfo.parameters", new PartitionSerdeInfoParametersSetter()) + .put("sd.skewedInfo.skewedColNames", new PartitionSkewedColsNamesSetter()) + .put("sd.skewedInfo.skewedColValues", new PartitionSkewedColsValuesSetter()) + .put("sd.skewedInfo.skewedColValueLocationMaps", + new PartitionSkewedColValLocationMapSetter()).build(); + + private static final String PART_ID = "PART_ID"; + private static final String SD_ID = "SD_ID"; + private static final String SERDE_ID = "SERDE_ID"; + private static final String CD_ID = "CD_ID"; + + private static final PartitionFieldNode partIdNode = new PartitionFieldNode(PART_ID); + private static final PartitionFieldNode sdIdNode = new PartitionFieldNode(SD_ID); + private static final PartitionFieldNode serdeIdNode = new PartitionFieldNode(SERDE_ID); + private static final PartitionFieldNode cdIdNode = new PartitionFieldNode(CD_ID); + + private final ImmutableMap fieldNameToTableName; + private final Set roots; + private final String PARTITIONS; + private final String SDS; + private final String SERDES; + private final String PARTITION_PARAMS; + private final PersistenceManager pm; + + @VisibleForTesting static final String SD_PATTERN = "sd|sd\\."; + @VisibleForTesting static final String SERDE_PATTERN = "sd\\.serdeInfo|sd\\.serdeInfo\\."; + @VisibleForTesting static final String CD_PATTERN = "sd\\.cols|sd\\.cols\\."; + + private static final int SD_INDEX = 0; + private static final int CD_INDEX = 1; + private static final int SERDE_INDEX = 2; + private static final int PART_INDEX = 3; + private final Set allPartitionFields; + + + public PartitionProjectionEvaluator(PersistenceManager pm, + ImmutableMap fieldNameToTableName, List projectionFields, + boolean convertMapNullsToEmptyStrings, boolean isView, String paramKeyPattern, + boolean excludeParamKeyPattern) throws MetaException { + this.pm = pm; + this.fieldNameToTableName = fieldNameToTableName; + this.convertMapNullsToEmptyStrings = convertMapNullsToEmptyStrings; + this.isView = isView; + this.paramKeyPattern = paramKeyPattern; + this.excludeParamKeyPattern = excludeParamKeyPattern; + this.PARTITIONS = + fieldNameToTableName.containsKey("PARTITIONS_TABLE_NAME") ? fieldNameToTableName + .get("PARTITIONS_TABLE_NAME") : "PARTITIONS"; + this.SDS = fieldNameToTableName.containsKey("SDS_TABLE_NAME") ? fieldNameToTableName + .get("SDS_TABLE_NAME") : "SDS"; + this.SERDES = fieldNameToTableName.containsKey("SERDES_TABLE_NAME") ? fieldNameToTableName + .get("SERDES_TABLE_NAME") : "SERDES"; + this.PARTITION_PARAMS = + fieldNameToTableName.containsKey("PARTITION_PARAMS") ? fieldNameToTableName + .get("PARTITION_PARAMS") : "PARTITION_PARAMS"; + try { + allPartitionFields = getPartitionClassFields(); + } catch (TException | IOException e) { + LOG.debug("Exception received while parsing partition fields", e); + throw new MetaException("Cannot load all partition fields" + e.getMessage()); + } + roots = parse(projectionFields); + + // we always query PART_ID + roots.add(partIdNode); + if (find(SD_PATTERN)) { + roots.add(sdIdNode); + } + if (find(SERDE_PATTERN)) { + roots.add(serdeIdNode); + } + if (find(CD_PATTERN)) { + roots.add(cdIdNode); + } + } + + /** + * Given a Java regex string pattern, checks if the the partitionFieldNode tree + * has any node whose fieldName matches the given pattern + * @param searchField + * @return + */ + @VisibleForTesting + boolean find(String searchField) { + Pattern p = Pattern.compile(searchField); + for (PartitionFieldNode node : roots) { + if (findUtil(node, p)) { + return true; + } + } + return false; + } + + private boolean findUtil(PartitionFieldNode root, Pattern p) { + if (root == null) { + return false; + } + if (p.matcher(root.fieldName).matches()) { + return true; + } + for (PartitionFieldNode child : root.children) { + if (findUtil(child, p)) { + return true; + } + } + return false; + } + + /** + * if top level field name is given expand the top level field such that all the children + * of that node are added to the projection list. eg. if only "sd" is provided in the projection + * list, it means all the nested fields for sd should be added to the projection fields + * @param projectionList + * @return + */ + private Set expand(Collection projectionList) throws MetaException { + Set result = new HashSet<>(); + for (String projectedField : projectionList) { + if (allPartitionFields.contains(projectedField)) { + result.add(projectedField); + } else { + boolean found = false; + for (String partitionField : allPartitionFields) { + if (partitionField.startsWith(projectedField)) { + LOG.debug("Found " + partitionField + " included within given projection field " + + projectedField); + result.add(partitionField); + found = true; + } + } + if (!found) { + throw new MetaException("Invalid field name " + projectedField); + } + } + } + return result; + } + + @VisibleForTesting + Set getRoots() { + return roots; + } + + private void validate(Collection projectionFields) throws MetaException { + Set verify = new HashSet<>(projectionFields); + verify.removeAll(allPartitionFields); + if (verify.size() > 0) { + throw new MetaException("Invalid partition fields in the projection spec" + Arrays + .toString(verify.toArray(new String[verify.size()]))); + } + } + + private Set parse(List inputProjectionFields) throws MetaException { + // in case of dbName and tableName we rely on table object to get their values + this.projectionFields = new HashSet<>(inputProjectionFields); + projectionFields.remove("dbName"); + projectionFields.remove("tableName"); + projectionFields.remove("catName"); + if (isView) { + // if this is a view SDs are not set so can be skipped + projectionFields.removeIf( + s -> s.matches(SD_PATTERN) || s.matches(SERDE_PATTERN) || s.matches(CD_PATTERN)); + } + // remove redundant fields + projectionFields = expand(projectionFields); + removeUnsupportedFields(); + validate(projectionFields); + + Map nestedNodes = new HashMap<>(); + Set rootNodes = new HashSet<>(); + + for (String projectedField : projectionFields) { + String[] fields = projectedField.split("\\."); + if (fields.length == 0) { + LOG.warn("Invalid projected field {}. Ignoring ..", projectedField); + continue; + } + StringBuilder fieldNameBuilder = new StringBuilder(fields[0]); + PartitionFieldNode currentNode = createIfNotExists(nestedNodes, fieldNameBuilder.toString()); + rootNodes.add(currentNode); + for (int level = 1; level < fields.length; level++) { + final String name = fieldNameBuilder.append(".").append(fields[level]).toString(); + PartitionFieldNode childNode = createIfNotExists(nestedNodes, name); + // all the children of a multi-valued nodes are also multi-valued + if (currentNode.isMultiValued) { + childNode.setMultiValued(); + } + currentNode.addChild(childNode); + currentNode = childNode; + } + } + return rootNodes; + } + + // TODO some of the optional partition fields are never set by DirectSQL implementation + // Removing such fields to keep it consistent with methods in MetastoreDirectSQL class + private void removeUnsupportedFields() { + List unsupportedFields = Arrays + .asList("sd.serdeInfo.serializerClass", "sd.serdeInfo.deserializerClass", + "sd.serdeInfo.serdeType", "sd.serdeInfo.description"); + for (String unsupportedField : unsupportedFields) { + if (projectionFields.contains(unsupportedField)) { + LOG.warn("DirectSQL does not return partitions with the optional field" + unsupportedField + + " set. Removing it from the projection list"); + projectionFields.remove(unsupportedField); + } + } + } + + private PartitionFieldNode createIfNotExists(Map nestedNodes, + String fieldName) { + PartitionFieldNode currentNode = nestedNodes.computeIfAbsent(fieldName, k -> { + if (multiValuedFieldSetters.containsKey(fieldName)) { + return new PartitionFieldNode(fieldName, true); + } else { + return new PartitionFieldNode(fieldName); + } + }); + return currentNode; + } + + public List getPartitionsUsingProjectionList(Table table, List partitionIds) + throws MetaException { + TreeMap sds = new TreeMap<>(); + TreeMap> cds = new TreeMap<>(); + TreeMap serdes = new TreeMap<>(); + TreeMap partitions = new TreeMap<>(); + List results = setSingleValuedFields(partitionIds, partitions, sds, serdes, cds); + setMultivaluedFields(partitions, sds, serdes, cds); + return results; + } + + private List setSingleValuedFields(List partitionIds, + final TreeMap partitions, final TreeMap sdIds, + final TreeMap serdeIds, final TreeMap> cdIds) + throws MetaException { + + StringBuilder queryTextBuilder = new StringBuilder(); + int numColumns = buildQueryForSingleValuedFields(partitionIds, queryTextBuilder); + String queryText = queryTextBuilder.toString(); + + try (Query query = pm.newQuery("javax.jdo.query.SQL", queryText)) { + + long start = LOG.isDebugEnabled() ? System.nanoTime() : 0; + List sqlResult = MetastoreDirectSqlUtils.executeWithArray(query, null, queryText); + long queryTime = LOG.isDebugEnabled() ? System.nanoTime() : 0; + MetastoreDirectSqlUtils.timingTrace(LOG.isDebugEnabled(), queryText, start, queryTime); + Deadline.checkTimeout(); + final Long[] ids = new Long[4]; + Object[] rowVals = new Object[1]; + // Keep order by name, consistent with JDO. + ArrayList orderedResult = new ArrayList(partitionIds.size()); + for (Object row : sqlResult) { + if (numColumns > 1) { + rowVals = (Object[])row; + } else { + // only one column is selected by query. The result class will be Object + rowVals[0] = row; + } + Partition part = new Partition(); + for (PartitionFieldNode root : roots) { + traverseAndSetValues(part, root, rowVals, new PartitionFieldValueSetter() { + @Override + public void setValue(Object partition, PartitionFieldNode node, Object value) + throws MetaException { + if (!node.isMultiValued) { + // in case of serdeid and sdId node we just collect the sdIds for further processing + if (node.equals(sdIdNode)) { + ids[SD_INDEX] = extractSqlLong(value); + } else if (node.equals(serdeIdNode)) { + ids[SERDE_INDEX] = extractSqlLong(value); + } else if (node.equals(cdIdNode)) { + ids[CD_INDEX] = extractSqlLong(value); + } else if (node.equals(partIdNode)) { + ids[PART_INDEX] = extractSqlLong(value); + } else { + // incase of sd.compressed and sd.storedAsSubDirectories we need special code to convert + // string to a boolean value + if (node.fieldName.equals("sd.compressed") || node.fieldName.equals("sd.storedAsSubDirectories")) { + value = MetastoreDirectSqlUtils.extractSqlBoolean(value); + } + MetaStoreUtils.setNestedProperty(partition, node.fieldName, value, true); + } + } + } + }); + } + // PART_ID is always queried + if (ids[PART_INDEX] == null) { + throw new MetaException("Could not find PART_ID for partition " + part); + } + partitions.put(ids[PART_INDEX], part); + orderedResult.add(part); + ids[PART_INDEX] = null; + + if (ids[SD_INDEX] != null) { + // sd object is initialized if any of the sd single-valued fields are in the projection + if (part.getSd() == null) { + part.setSd(new StorageDescriptor()); + } + sdIds.put(ids[SD_INDEX].longValue(), part.getSd()); + ids[SD_INDEX] = null; + } + + if (ids[SERDE_INDEX] != null) { + // serde object must have already been intialized above in MetaStoreUtils.setNestedProperty call + if (part.getSd().getSerdeInfo() == null) { + part.getSd().setSerdeInfo(new SerDeInfo()); + } + serdeIds.put(ids[SERDE_INDEX].longValue(), part.getSd().getSerdeInfo()); + ids[SERDE_INDEX] = null; + } + + if (ids[CD_INDEX] != null) { + // common case is all the SDs will reuse the same CD + // allocate List only when you see a new CD_ID + cdIds.putIfAbsent(ids[CD_INDEX], new ArrayList<>(5)); + if (part.getSd().getCols() == null) { + part.getSd().setCols(cdIds.get(ids[CD_INDEX])); + } + ids[CD_INDEX] = null; + } + Deadline.checkTimeout(); + } + return orderedResult; + } catch (Exception e) { + LOG.error("Exception received while getting partitions using projected fields", e); + throw new MetaException(e.getMessage()); + } + } + + private boolean hasSingledValuedLeaf(PartitionFieldNode root) { + if (root == null) return false; + if (root.isLeafNode()) return !root.isMultiValued(); + + for (PartitionFieldNode child : root.getChildren()) { + if (hasSingledValuedLeaf(child)) { + return true; + } + } + return false; + } + + private void setMultivaluedFields(TreeMap partitions, + TreeMap sds, TreeMap serdes, + TreeMap> cds) throws MetaException { + for (PartitionFieldNode root : roots) { + traverseAndSetMultiValuedFields(root, partitions, sds, serdes, cds); + } + } + + private void traverseAndSetMultiValuedFields(PartitionFieldNode root, + TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) throws MetaException { + if (root == null) { + return; + } + // if a multi-valued node is found set its value using its value-setters + // note that once a multi-valued node is found the method does not recurse further + // this is because the multi-valued setter also sets the values of all its descendents + if (root.isMultiValued) { + MutivaluedFieldSetter multiValuedFieldSetter = multiValuedFieldSetters.get(root.fieldName); + if (multiValuedFieldSetter == null) { + throw new MetaException( + "Cannot find multi-valued field setter for field " + root.fieldName); + } + multiValuedFieldSetter.setValue(root, partitions, sds, serdes, cds); + } else { + for (PartitionFieldNode child : root.children) { + traverseAndSetMultiValuedFields(child, partitions, sds, serdes, cds); + } + } + } + + private void traverseAndSetValues(Partition part, PartitionFieldNode root, Object[] row, + PartitionFieldValueSetter valueSetter) throws MetaException { + // if root is null or is multiValued, do not recurse further + // multi-valued fields are set separately in setMultiValuedFields method + if (root == null || root.isMultiValued()) { + return; + } + if (root.isLeafNode()) { + valueSetter.setValue(part, root, row[root.fieldIndex]); + return; + } + for (PartitionFieldNode child : root.children) { + traverseAndSetValues(part, child, row, valueSetter); + } + } + + private static final String SPACE = " "; + + private int buildQueryForSingleValuedFields(List partitionIds, StringBuilder queryTextBuilder) { + queryTextBuilder.append("select "); + // build projection columns using the ProjectedFields + // it should not matter if you select all the + List columnList = getSingleValuedColumnNames(roots); + queryTextBuilder.append(Joiner.on(',').join(columnList)); + queryTextBuilder.append(SPACE); + queryTextBuilder.append("from " + PARTITIONS); + // if SD fields are selected add join clause with SDS + boolean foundSD = false; + if (find(SD_PATTERN)) { + queryTextBuilder.append(SPACE); + queryTextBuilder.append( + "left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + ".\"SD_ID\""); + foundSD = true; + } + // if serde fields are selected add join clause on serdes + if (foundSD || find(SERDE_PATTERN)) { + queryTextBuilder.append(SPACE); + queryTextBuilder.append( + " left outer join " + SERDES + " on " + SDS + ".\"SERDE_ID\" = " + SERDES + + ".\"SERDE_ID\""); + } + queryTextBuilder.append(SPACE); + //add where clause + queryTextBuilder.append("where \"PART_ID\" in (" + Joiner.on(',').join(partitionIds) + + ") order by \"PART_NAME\" asc"); + return columnList.size(); + } + + private int getSingleValuedColumnName(PartitionFieldNode root, int fieldId, + final List columnNames) { + if (root == null) { + return fieldId; + } + if (root.isLeafNode() && !root.isMultiValued) { + if (fieldNameToTableName.containsKey(root.fieldName)) { + columnNames.add(fieldNameToTableName.get(root.fieldName)); + root.setFieldIndex(fieldId++); + return fieldId; + } + throw new RuntimeException( + "No column name mapping found for partition field " + root.fieldName); + } + for (PartitionFieldNode child : root.children) { + fieldId = getSingleValuedColumnName(child, fieldId, columnNames); + } + return fieldId; + } + + private List getSingleValuedColumnNames(Set roots) { + List columnNames = new ArrayList<>(); + int fieldIndex = 0; + for (PartitionFieldNode node : roots) { + fieldIndex = getSingleValuedColumnName(node, fieldIndex, columnNames); + } + return columnNames; + } + + static Set getPartitionClassFields() throws TException, IOException { + Partition dummy = new Partition(); + //level 1 fields + dummy.setValues(Collections.EMPTY_LIST); + dummy.setDbName("db"); + dummy.setTableName("tbl"); + dummy.setParameters(Collections.EMPTY_MAP); + dummy.setCatName("dummyCatname"); + + StorageDescriptor sd = new StorageDescriptor(); + FieldSchema fs = new FieldSchema("", "", ""); + List cols = new ArrayList<>(1); + cols.add(fs); + sd.setCols(cols); + sd.setLocation(""); + sd.setInputFormat(""); + sd.setOutputFormat(""); + dummy.setSd(sd); + + //serdeInfo + SerDeInfo serDeInfo = new SerDeInfo(); + serDeInfo.setParameters(Collections.EMPTY_MAP); + serDeInfo.setName(""); + serDeInfo.setSerializationLib(""); + serDeInfo.setParameters(Collections.EMPTY_MAP); + serDeInfo.setDescription(""); + serDeInfo.setSerializerClass(""); + serDeInfo.setDeserializerClass(""); + serDeInfo.setSerdeType(SerdeType.HIVE); + sd.setSerdeInfo(serDeInfo); + + sd.setBucketCols(Collections.EMPTY_LIST); + List sortCols = new ArrayList<>(0); + sortCols.add(new Order("dummy", 1)); + sd.setSortCols(sortCols); + sd.setParameters(new HashMap<>(0)); + SkewedInfo skewedInfo = new SkewedInfo(); + skewedInfo.setSkewedColValueLocationMaps(Collections.EMPTY_MAP); + skewedInfo.setSkewedColNames(Collections.EMPTY_LIST); + skewedInfo.setSkewedColValues(Collections.EMPTY_LIST); + sd.setSkewedInfo(skewedInfo); + sd.setStoredAsSubDirectories(true); + + serDeInfo.setParameters(Collections.EMPTY_MAP); + + Set results= new HashSet<>(); + TSerializer serializer = new TSerializer(new TSimpleJSONProtocol.Factory()); + String json = serializer.toString(dummy); + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode jsonNode = objectMapper.readTree(json); + getNestedFieldName(jsonNode, "", results); + return results; + } + + private static void getNestedFieldName(JsonNode jsonNode, String fieldName, + Collection results) { + if (jsonNode instanceof ArrayNode) { + Iterator elements = ((ArrayNode) jsonNode).elements(); + if (!elements.hasNext()) { + results.add(fieldName); + return; + } + while (elements.hasNext()) { + JsonNode element = elements.next(); + getNestedFieldName(element, fieldName, results); + } + } else { + Iterator> fields = jsonNode.fields(); + if (!fields.hasNext()) { + results.add(fieldName); + return; + } + while (fields.hasNext()) { + Entry fieldKV = fields.next(); + String key = fieldKV.getKey(); + getNestedFieldName(fieldKV.getValue(), fieldName.length() == 0 ? key : fieldName + "." + key, + results); + } + } + } + + static class PartitionFieldNode { + private String fieldName; + private Set children = new HashSet<>(4); + private boolean isMultiValued; + private int fieldIndex; + + PartitionFieldNode(String fieldName) { + this.fieldName = fieldName; + isMultiValued = false; + } + + PartitionFieldNode(String fieldName, boolean isMultiValued) { + this.fieldName = fieldName; + this.isMultiValued = isMultiValued; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + PartitionFieldNode that = (PartitionFieldNode) o; + return Objects.equals(fieldName, that.fieldName); + } + + boolean isLeafNode() { + return children == null || children.isEmpty(); + } + + void setFieldIndex(int fieldIndex) { + this.fieldIndex = fieldIndex; + } + + @VisibleForTesting + void addChild(PartitionFieldNode child) { + children.add(child); + } + + @VisibleForTesting + String getFieldName() { + return fieldName; + } + + @VisibleForTesting + Set getChildren() { + return new HashSet<>(children); + } + + @VisibleForTesting + boolean isMultiValued() { + return isMultiValued; + } + + @Override + public String toString() { + return fieldName; + } + + @Override + public int hashCode() { + return Objects.hash(fieldName); + } + + void setMultiValued() { + this.isMultiValued = true; + } + } + + private interface MutivaluedFieldSetter { + void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) throws MetaException; + } + + private class PartitionValuesSetter implements MutivaluedFieldSetter { + private PartitionValuesSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String tableName = + fieldNameToTableName.containsKey("PARTITION_KEY_VALS") ? fieldNameToTableName + .get("PARTITION_KEY_VALS") : "PARTITION_KEY_VALS"; + MetastoreDirectSqlUtils + .setPartitionValues(tableName, pm, Joiner.on(',').join(partitions.keySet()), partitions); + } + } + + private class PartitionParametersSetter implements MutivaluedFieldSetter { + private PartitionParametersSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + MetastoreDirectSqlUtils + .setPartitionParametersWithFilter(PARTITION_PARAMS, convertMapNullsToEmptyStrings, pm, + Joiner.on(',').join(partitions.keySet()), partitions, paramKeyPattern, + excludeParamKeyPattern); + } + } + + private class PartitionSDColsSetter implements MutivaluedFieldSetter { + private PartitionSDColsSetter() { + // prevent instantiation + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + // find the fields which are requested for sd.cols + // children field names would be sd.cols.name, sd.cols.type or sd.cols.description + List childFields = getChildrenFieldNames(root); + final String tableName = fieldNameToTableName.containsKey("COLUMNS_V2") ? fieldNameToTableName + .get("COLUMNS_V2") : "COLUMNS_V2"; + MetastoreDirectSqlUtils + .setSDCols(tableName, childFields, pm, cds, Joiner.on(',').join(cds.keySet())); + } + } + + private class PartitionSDBucketColsSetter implements MutivaluedFieldSetter { + private PartitionSDBucketColsSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String tableName = + fieldNameToTableName.containsKey("BUCKETING_COLS") ? fieldNameToTableName + .get("BUCKETING_COLS") : "BUCKETING_COLS"; + MetastoreDirectSqlUtils + .setSDBucketCols(tableName, pm, sds, Joiner.on(',').join(sds.keySet())); + } + } + + private class PartitionSortColsSetter implements MutivaluedFieldSetter { + private PartitionSortColsSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + List childFieldNames = getChildrenFieldNames(root); + final String tableName = fieldNameToTableName.containsKey("SORT_COLS") ? fieldNameToTableName + .get("SORT_COLS") : "SORT_COLS"; + MetastoreDirectSqlUtils + .setSDSortCols(tableName, childFieldNames, pm, sds, Joiner.on(',').join(sds.keySet())); + } + } + + private List getChildrenFieldNames(PartitionFieldNode root) throws MetaException { + List childFields = new ArrayList<>(3); + for (PartitionFieldNode child : root.getChildren()) { + if (child.getFieldName().lastIndexOf(".") < 0) { + throw new MetaException("Error parsing multi-valued field name " + child.getFieldName()); + } + childFields.add(child.getFieldName().substring(child.getFieldName().lastIndexOf(".") + 1)); + } + return childFields; + } + + private class PartitionSDParametersSetter implements MutivaluedFieldSetter { + private PartitionSDParametersSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String tableName = fieldNameToTableName.containsKey("SD_PARAMS") ? fieldNameToTableName + .get("SD_PARAMS") : "SD_PARAMS"; + MetastoreDirectSqlUtils.setSDParameters(tableName, convertMapNullsToEmptyStrings, pm, sds, + Joiner.on(',').join(sds.keySet())); + } + } + + private class PartitionSerdeInfoParametersSetter implements MutivaluedFieldSetter { + private PartitionSerdeInfoParametersSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String tableName = + fieldNameToTableName.containsKey("SERDE_PARAMS") ? fieldNameToTableName + .get("SERDE_PARAMS") : "SERDE_PARAMS"; + MetastoreDirectSqlUtils.setSerdeParams(tableName, convertMapNullsToEmptyStrings, pm, serdes, + Joiner.on(',').join(serdes.keySet())); + } + } + + private class PartitionSkewedColsNamesSetter implements MutivaluedFieldSetter { + private PartitionSkewedColsNamesSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String tableName = + fieldNameToTableName.containsKey("SKEWED_COL_NAMES") ? fieldNameToTableName + .get("SKEWED_COL_NAMES") : "SKEWED_COL_NAMES"; + MetastoreDirectSqlUtils + .setSkewedColNames(tableName, pm, sds, Joiner.on(',').join(sds.keySet())); + } + } + + private class PartitionSkewedColsValuesSetter implements MutivaluedFieldSetter { + private PartitionSkewedColsValuesSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String skewedStringListVals = + fieldNameToTableName.containsKey("SKEWED_STRING_LIST_VALUES") ? fieldNameToTableName + .get("SKEWED_STRING_LIST_VALUES") : "SKEWED_STRING_LIST_VALUES"; + final String skewedVals = + fieldNameToTableName.containsKey("SKEWED_VALUES") ? fieldNameToTableName + .get("SKEWED_VALUES") : "SKEWED_VALUES"; + MetastoreDirectSqlUtils.setSkewedColValues(skewedStringListVals, skewedVals, pm, sds, + Joiner.on(',').join(sds.keySet())); + } + } + + private class PartitionSkewedColValLocationMapSetter implements MutivaluedFieldSetter { + private PartitionSkewedColValLocationMapSetter() { + // + } + + @Override + public void setValue(PartitionFieldNode root, TreeMap partitions, TreeMap sds, + TreeMap serdes, TreeMap> cds) + throws MetaException { + final String skewedStringListVals = + fieldNameToTableName.containsKey("SKEWED_STRING_LIST_VALUES") ? fieldNameToTableName + .get("SKEWED_STRING_LIST_VALUES") : "SKEWED_STRING_LIST_VALUES"; + final String skewedColValLocMap = + fieldNameToTableName.containsKey("SKEWED_COL_VALUE_LOC_MAP") ? fieldNameToTableName + .get("SKEWED_COL_VALUE_LOC_MAP") : "SKEWED_COL_VALUE_LOC_MAP"; + MetastoreDirectSqlUtils + .setSkewedColLocationMaps(skewedColValLocMap, skewedStringListVals, pm, sds, + Joiner.on(',').join(sds.keySet())); + } + } +} diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index b61ee81533930c889f23d2551041055cbdd1a6b2..cb80809002d4ba426bfe6e7e4d015683b73fb65a 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -20,6 +20,12 @@ import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.ISchemaName; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -545,6 +551,9 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List getPartitionSpecsByFilterAndProjection(Table table, final boolean allowSql, final boolean allowJdo, + final List fieldList, final String paramKeys, boolean excludeFlag) + throws MetaException, NoSuchObjectException; /** * Get partitions using an already parsed expression. * @param catName catalog name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 7a0b21b2580d8bb9b256dbc698f125ed15ccdcd3..15df30520d1015514a45d0bc91dbf326be740568 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore; import java.math.BigDecimal; -import java.math.BigInteger; import java.nio.ByteBuffer; import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; @@ -501,73 +500,73 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData colType = colType.toLowerCase(); if (colType.equals("boolean")) { BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); - boolStats.setNumFalses(MetaStoreDirectSql.extractSqlLong(falses)); - boolStats.setNumTrues(MetaStoreDirectSql.extractSqlLong(trues)); - boolStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + boolStats.setNumFalses(MetastoreDirectSqlUtils.extractSqlLong(falses)); + boolStats.setNumTrues(MetastoreDirectSqlUtils.extractSqlLong(trues)); + boolStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); data.setBooleanStats(boolStats); } else if (colType.equals("string") || colType.startsWith("varchar") || colType.startsWith("char")) { StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector(); - stringStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + stringStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); stringStats.setAvgColLen(MetaStoreDirectSql.extractSqlDouble(avglen)); - stringStats.setMaxColLen(MetaStoreDirectSql.extractSqlLong(maxlen)); - stringStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); + stringStats.setMaxColLen(MetastoreDirectSqlUtils.extractSqlLong(maxlen)); + stringStats.setNumDVs(MetastoreDirectSqlUtils.extractSqlLong(dist)); stringStats.setBitVectors(MetaStoreDirectSql.extractSqlBlob(bitVector)); data.setStringStats(stringStats); } else if (colType.equals("binary")) { BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - binaryStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + binaryStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); binaryStats.setAvgColLen(MetaStoreDirectSql.extractSqlDouble(avglen)); - binaryStats.setMaxColLen(MetaStoreDirectSql.extractSqlLong(maxlen)); + binaryStats.setMaxColLen(MetastoreDirectSqlUtils.extractSqlLong(maxlen)); data.setBinaryStats(binaryStats); } else if (colType.equals("bigint") || colType.equals("int") || colType.equals("smallint") || colType.equals("tinyint") || colType.equals("timestamp")) { LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector(); - longStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + longStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (lhigh != null) { - longStats.setHighValue(MetaStoreDirectSql.extractSqlLong(lhigh)); + longStats.setHighValue(MetastoreDirectSqlUtils.extractSqlLong(lhigh)); } if (llow != null) { - longStats.setLowValue(MetaStoreDirectSql.extractSqlLong(llow)); + longStats.setLowValue(MetastoreDirectSqlUtils.extractSqlLong(llow)); } - longStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); + longStats.setNumDVs(MetastoreDirectSqlUtils.extractSqlLong(dist)); longStats.setBitVectors(MetaStoreDirectSql.extractSqlBlob(bitVector)); data.setLongStats(longStats); } else if (colType.equals("double") || colType.equals("float")) { DoubleColumnStatsDataInspector doubleStats = new DoubleColumnStatsDataInspector(); - doubleStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + doubleStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (dhigh != null) { doubleStats.setHighValue(MetaStoreDirectSql.extractSqlDouble(dhigh)); } if (dlow != null) { doubleStats.setLowValue(MetaStoreDirectSql.extractSqlDouble(dlow)); } - doubleStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); + doubleStats.setNumDVs(MetastoreDirectSqlUtils.extractSqlLong(dist)); doubleStats.setBitVectors(MetaStoreDirectSql.extractSqlBlob(bitVector)); data.setDoubleStats(doubleStats); } else if (colType.startsWith("decimal")) { DecimalColumnStatsDataInspector decimalStats = new DecimalColumnStatsDataInspector(); - decimalStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + decimalStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (dechigh != null) { decimalStats.setHighValue(DecimalUtils.createThriftDecimal((String)dechigh)); } if (declow != null) { decimalStats.setLowValue(DecimalUtils.createThriftDecimal((String)declow)); } - decimalStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); + decimalStats.setNumDVs(MetastoreDirectSqlUtils.extractSqlLong(dist)); decimalStats.setBitVectors(MetaStoreDirectSql.extractSqlBlob(bitVector)); data.setDecimalStats(decimalStats); } else if (colType.equals("date")) { DateColumnStatsDataInspector dateStats = new DateColumnStatsDataInspector(); - dateStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + dateStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (lhigh != null) { - dateStats.setHighValue(new Date(MetaStoreDirectSql.extractSqlLong(lhigh))); + dateStats.setHighValue(new Date(MetastoreDirectSqlUtils.extractSqlLong(lhigh))); } if (llow != null) { - dateStats.setLowValue(new Date(MetaStoreDirectSql.extractSqlLong(llow))); + dateStats.setLowValue(new Date(MetastoreDirectSqlUtils.extractSqlLong(llow))); } - dateStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); + dateStats.setNumDVs(MetastoreDirectSqlUtils.extractSqlLong(dist)); dateStats.setBitVectors(MetaStoreDirectSql.extractSqlBlob(bitVector)); data.setDateStats(dateStats); } @@ -582,48 +581,48 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData colType = colType.toLowerCase(); if (colType.equals("boolean")) { BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); - boolStats.setNumFalses(MetaStoreDirectSql.extractSqlLong(falses)); - boolStats.setNumTrues(MetaStoreDirectSql.extractSqlLong(trues)); - boolStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + boolStats.setNumFalses(MetastoreDirectSqlUtils.extractSqlLong(falses)); + boolStats.setNumTrues(MetastoreDirectSqlUtils.extractSqlLong(trues)); + boolStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); data.setBooleanStats(boolStats); } else if (colType.equals("string") || colType.startsWith("varchar") || colType.startsWith("char")) { StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector(); - stringStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + stringStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); stringStats.setAvgColLen(MetaStoreDirectSql.extractSqlDouble(avglen)); - stringStats.setMaxColLen(MetaStoreDirectSql.extractSqlLong(maxlen)); - stringStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); + stringStats.setMaxColLen(MetastoreDirectSqlUtils.extractSqlLong(maxlen)); + stringStats.setNumDVs(MetastoreDirectSqlUtils.extractSqlLong(dist)); data.setStringStats(stringStats); } else if (colType.equals("binary")) { BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - binaryStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + binaryStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); binaryStats.setAvgColLen(MetaStoreDirectSql.extractSqlDouble(avglen)); - binaryStats.setMaxColLen(MetaStoreDirectSql.extractSqlLong(maxlen)); + binaryStats.setMaxColLen(MetastoreDirectSqlUtils.extractSqlLong(maxlen)); data.setBinaryStats(binaryStats); } else if (colType.equals("bigint") || colType.equals("int") || colType.equals("smallint") || colType.equals("tinyint") || colType.equals("timestamp")) { LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector(); - longStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + longStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (lhigh != null) { - longStats.setHighValue(MetaStoreDirectSql.extractSqlLong(lhigh)); + longStats.setHighValue(MetastoreDirectSqlUtils.extractSqlLong(lhigh)); } if (llow != null) { - longStats.setLowValue(MetaStoreDirectSql.extractSqlLong(llow)); + longStats.setLowValue(MetastoreDirectSqlUtils.extractSqlLong(llow)); } - long lowerBound = MetaStoreDirectSql.extractSqlLong(dist); - long higherBound = MetaStoreDirectSql.extractSqlLong(sumDist); + long lowerBound = MetastoreDirectSqlUtils.extractSqlLong(dist); + long higherBound = MetastoreDirectSqlUtils.extractSqlLong(sumDist); long rangeBound = Long.MAX_VALUE; if (lhigh != null && llow != null) { - rangeBound = MetaStoreDirectSql.extractSqlLong(lhigh) - - MetaStoreDirectSql.extractSqlLong(llow) + 1; + rangeBound = MetastoreDirectSqlUtils.extractSqlLong(lhigh) + - MetastoreDirectSqlUtils.extractSqlLong(llow) + 1; } long estimation; if (useDensityFunctionForNDVEstimation && lhigh != null && llow != null && avgLong != null && MetaStoreDirectSql.extractSqlDouble(avgLong) != 0.0) { // We have estimation, lowerbound and higherbound. We use estimation if // it is between lowerbound and higherbound. - estimation = MetaStoreDirectSql - .extractSqlLong((MetaStoreDirectSql.extractSqlLong(lhigh) - MetaStoreDirectSql + estimation = MetastoreDirectSqlUtils + .extractSqlLong((MetastoreDirectSqlUtils.extractSqlLong(lhigh) - MetastoreDirectSqlUtils .extractSqlLong(llow)) / MetaStoreDirectSql.extractSqlDouble(avgLong)); if (estimation < lowerBound) { estimation = lowerBound; @@ -638,27 +637,27 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData data.setLongStats(longStats); } else if (colType.equals("date")) { DateColumnStatsDataInspector dateStats = new DateColumnStatsDataInspector(); - dateStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + dateStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (lhigh != null) { - dateStats.setHighValue(new Date(MetaStoreDirectSql.extractSqlLong(lhigh))); + dateStats.setHighValue(new Date(MetastoreDirectSqlUtils.extractSqlLong(lhigh))); } if (llow != null) { - dateStats.setLowValue(new Date(MetaStoreDirectSql.extractSqlLong(llow))); + dateStats.setLowValue(new Date(MetastoreDirectSqlUtils.extractSqlLong(llow))); } - long lowerBound = MetaStoreDirectSql.extractSqlLong(dist); - long higherBound = MetaStoreDirectSql.extractSqlLong(sumDist); + long lowerBound = MetastoreDirectSqlUtils.extractSqlLong(dist); + long higherBound = MetastoreDirectSqlUtils.extractSqlLong(sumDist); long rangeBound = Long.MAX_VALUE; if (lhigh != null && llow != null) { - rangeBound = MetaStoreDirectSql.extractSqlLong(lhigh) - - MetaStoreDirectSql.extractSqlLong(llow) + 1; + rangeBound = MetastoreDirectSqlUtils.extractSqlLong(lhigh) + - MetastoreDirectSqlUtils.extractSqlLong(llow) + 1; } long estimation; if (useDensityFunctionForNDVEstimation && lhigh != null && llow != null && avgLong != null && MetaStoreDirectSql.extractSqlDouble(avgLong) != 0.0) { // We have estimation, lowerbound and higherbound. We use estimation if // it is between lowerbound and higherbound. - estimation = MetaStoreDirectSql - .extractSqlLong((MetaStoreDirectSql.extractSqlLong(lhigh) - MetaStoreDirectSql + estimation = MetastoreDirectSqlUtils + .extractSqlLong((MetastoreDirectSqlUtils.extractSqlLong(lhigh) - MetastoreDirectSqlUtils .extractSqlLong(llow)) / MetaStoreDirectSql.extractSqlDouble(avgLong)); if (estimation < lowerBound) { estimation = lowerBound; @@ -673,19 +672,19 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData data.setDateStats(dateStats); } else if (colType.equals("double") || colType.equals("float")) { DoubleColumnStatsDataInspector doubleStats = new DoubleColumnStatsDataInspector(); - doubleStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + doubleStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); if (dhigh != null) { doubleStats.setHighValue(MetaStoreDirectSql.extractSqlDouble(dhigh)); } if (dlow != null) { doubleStats.setLowValue(MetaStoreDirectSql.extractSqlDouble(dlow)); } - long lowerBound = MetaStoreDirectSql.extractSqlLong(dist); - long higherBound = MetaStoreDirectSql.extractSqlLong(sumDist); + long lowerBound = MetastoreDirectSqlUtils.extractSqlLong(dist); + long higherBound = MetastoreDirectSqlUtils.extractSqlLong(sumDist); if (useDensityFunctionForNDVEstimation && dhigh != null && dlow != null && avgDouble != null && MetaStoreDirectSql.extractSqlDouble(avgDouble) != 0.0) { - long estimation = MetaStoreDirectSql - .extractSqlLong((MetaStoreDirectSql.extractSqlLong(dhigh) - MetaStoreDirectSql + long estimation = MetastoreDirectSqlUtils + .extractSqlLong((MetastoreDirectSqlUtils.extractSqlLong(dhigh) - MetastoreDirectSqlUtils .extractSqlLong(dlow)) / MetaStoreDirectSql.extractSqlDouble(avgDouble)); if (estimation < lowerBound) { doubleStats.setNumDVs(lowerBound); @@ -700,7 +699,7 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData data.setDoubleStats(doubleStats); } else if (colType.startsWith("decimal")) { DecimalColumnStatsDataInspector decimalStats = new DecimalColumnStatsDataInspector(); - decimalStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); + decimalStats.setNumNulls(MetastoreDirectSqlUtils.extractSqlLong(nulls)); Decimal low = null; Decimal high = null; BigDecimal blow = null; @@ -722,11 +721,11 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData low = DecimalUtils.createThriftDecimal((String) declow); } decimalStats.setLowValue(low); - long lowerBound = MetaStoreDirectSql.extractSqlLong(dist); - long higherBound = MetaStoreDirectSql.extractSqlLong(sumDist); + long lowerBound = MetastoreDirectSqlUtils.extractSqlLong(dist); + long higherBound = MetastoreDirectSqlUtils.extractSqlLong(sumDist); if (useDensityFunctionForNDVEstimation && dechigh != null && declow != null && avgDecimal != null && MetaStoreDirectSql.extractSqlDouble(avgDecimal) != 0.0) { - long estimation = MetaStoreDirectSql.extractSqlLong(MetaStoreDirectSql.extractSqlLong(bhigh + long estimation = MetastoreDirectSqlUtils.extractSqlLong(MetastoreDirectSqlUtils.extractSqlLong(bhigh .subtract(blow).floatValue() / MetaStoreDirectSql.extractSqlDouble(avgDecimal))); if (estimation < lowerBound) { decimalStats.setNumDVs(lowerBound); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 0445cbf9095285bdcde72946f1b6dd9a9a3b9fff..98f674a04b39f0de9d2c968c13eaa8038fcdde91 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1253,6 +1253,12 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, } @Override + public List getPartitionSpecsByFilterAndProjection(Table table, boolean allowSql, boolean allowJdo, + List fieldList, String paramKeys, boolean excludeParamKeysFlag) throws MetaException, NoSuchObjectException { + return rawStore.getPartitionSpecsByFilterAndProjection(table, allowSql, allowJdo, fieldList, paramKeys, excludeParamKeysFlag); + } + + @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { catName = StringUtils.normalizeIdentifier(catName); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java index 68f07e2569b6531cf3e18919209aed1a17e88bf7..b8895dff20d690f22900ced3a8de015f8742e49a 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java @@ -29,6 +29,9 @@ private String deserializerClass; private int serdeType; + public MSerDeInfo() { + //default constructor used for deserialization + } /** * * @param name diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 5233bee59220244e89f05b6c4dbf86a2cc6dc9fe..8c000d80457dcbf4f4fc46c9397441cb40a910e2 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hive.metastore.utils; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.Multimaps; +import org.apache.commons.beanutils.PropertyUtils; +import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD; +import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD; import org.apache.hadoop.hive.metastore.api.WMPoolSchedulingPolicy; import com.google.common.base.Joiner; @@ -44,20 +51,28 @@ import javax.annotation.Nullable; +import java.beans.PropertyDescriptor; import java.io.File; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; import java.net.URL; import java.net.URLClassLoader; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Properties; import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; public class MetaStoreUtils { /** A fixed date format to be used for hive partition column values. */ @@ -834,6 +849,18 @@ public static WMPoolSchedulingPolicy parseSchedulingPolicy(String schedulingPoli return Enum.valueOf(WMPoolSchedulingPolicy.class, schedulingPolicy); } + /** + * Helper method for preparing for "SOMETHING_ID in (...)" to use in future queries. + * @param objectIds the objectId collection + * @return The concatenated list + * @throws MetaException If the list contains wrong data + */ + public static String getIdListForIn(List objectIds) throws MetaException { + return objectIds.stream() + .map(i -> i.toString()) + .collect(Collectors.joining(",")); + } + private static boolean hasCatalogName(String dbName) { return dbName != null && dbName.length() > 0 && dbName.charAt(0) == CATALOG_DB_THRIFT_NAME_MARKER; @@ -920,6 +947,203 @@ public static String prependCatalogToDbName(String dbName, Configuration conf) { } } + private static class StorageDescriptorKey { + + private StorageDescriptor sd; + private String serializationLib; + private String inputFormat; + private String outputFormat; + private String baseLocation; + private List cols; + + StorageDescriptorKey(StorageDescriptor sd) { + this.sd = sd; + inputFormat = sd.getInputFormat(); + outputFormat = sd.getOutputFormat(); + serializationLib = sd.getSerdeInfo() == null ? null : sd.getSerdeInfo().getSerializationLib(); + cols = sd.getCols(); + baseLocation = sd.getLocation(); + } + + StorageDescriptorKey(String baseLocation, StorageDescriptor sd) { + this.sd = sd; + inputFormat = sd.getInputFormat(); + outputFormat = sd.getOutputFormat(); + serializationLib = sd.getSerdeInfo() == null ? null : sd.getSerdeInfo().getSerializationLib(); + cols = sd.getCols(); + //use the baseLocation provided instead of location from sd + this.baseLocation = baseLocation; + } + + StorageDescriptorKey() { + //all fields are null + } + + StorageDescriptor getSd() { + return sd; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + StorageDescriptorKey that = (StorageDescriptorKey) o; + return Objects.equals(serializationLib, that.serializationLib) && Objects + .equals(inputFormat, that.inputFormat) && Objects.equals(outputFormat, that.outputFormat) + && Objects.equals(baseLocation, that.baseLocation) && Objects.equals(cols, that.cols); + } + + @Override + public int hashCode() { + return Objects.hash(serializationLib, inputFormat, outputFormat, baseLocation, cols); + } + } + + private static final char DOT = '.'; + public static void setNestedProperty(Object bean, String propertyName, Object value, + boolean instantiateMissingFields) throws MetaException { + try { + String[] nestedFields = propertyName.split("\\."); + //check if there are more than one nested levels + if (nestedFields.length > 1 && instantiateMissingFields) { + StringBuilder fieldNameBuilder = new StringBuilder(); + //check if all the nested levels until the given fieldName is set + for (int level = 0; level get_partitionspecs_grouped_by_storage_descriptor(Table table, + Collection partitions) throws MetaException { + final String tablePath = table.getSd().getLocation(); + final StorageDescriptorKey unsetSDKey = new StorageDescriptorKey(); + + ImmutableListMultimap partitionsWithinTableDirectory = + Multimaps.index(partitions, input -> { + // if sd is not in the list of projected fields, all the partitions + // can be just grouped in PartitionSpec object + if (input.getSd() == null) { + return unsetSDKey; + } + // if the partition is within table, use the tableSDKey to group it with other partitions + // within the table directory + if (input.getSd().getLocation() != null && input.getSd().getLocation() + .startsWith(tablePath)) { + return new StorageDescriptorKey(tablePath, input.getSd()); + } + // if partitions are located outside table location we treat them as non-standard + // and do not perform any grouping + // if the location is not set partitions are grouped according to the rest of the SD fields + return new StorageDescriptorKey(input.getSd()); + }); + + List partSpecs = new ArrayList<>(); + + // Classify partitions based on shared SD properties. + Map> sdToPartList + = new HashMap<>(); + // we don't expect partitions to exist outside directory in most cases + List partitionsOutsideTableDir = new ArrayList<>(0); + for (StorageDescriptorKey key : partitionsWithinTableDirectory.keySet()) { + boolean isUnsetKey = key.equals(unsetSDKey); + // group the partitions together when + // case I : sd is not set because it was not in the requested fields + // case II : when sd.location is not set because it was not in the requested fields + // case III : when sd.location is set and it is located within table directory + if (isUnsetKey || key.baseLocation == null || key.baseLocation.equals(tablePath)) { + for (Partition partition : partitionsWithinTableDirectory.get(key)) { + + PartitionWithoutSD partitionWithoutSD + = new PartitionWithoutSD(); + partitionWithoutSD.setValues(partition.getValues()); + partitionWithoutSD.setCreateTime(partition.getCreateTime()); + partitionWithoutSD.setLastAccessTime(partition.getLastAccessTime()); + partitionWithoutSD.setRelativePath( + (isUnsetKey || !partition.getSd().isSetLocation()) ? null : partition.getSd() + .getLocation().substring(tablePath.length())); + partitionWithoutSD.setParameters(partition.getParameters()); + + if (!sdToPartList.containsKey(key)) { + sdToPartList.put(key, new ArrayList<>()); + } + sdToPartList.get(key).add(partitionWithoutSD); + } + } else { + // Lump all partitions outside the tablePath into one PartSpec. + // if non-standard partitions need not be deDuped create PartitionListComposingSpec + // this will be used mostly for keeping backwards compatibility with some HMS APIs which use + // PartitionListComposingSpec for non-standard partitions located outside table + partitionsOutsideTableDir.addAll(partitionsWithinTableDirectory.get(key)); + } + } + // create sharedSDPartSpec for all the groupings + for (Map.Entry> entry : sdToPartList + .entrySet()) { + partSpecs.add(getSharedSDPartSpec(table, entry.getKey(), entry.getValue())); + } + if (!partitionsOutsideTableDir.isEmpty()) { + PartitionSpec partListSpec = new PartitionSpec(); + partListSpec.setDbName(table.getDbName()); + partListSpec.setTableName(table.getTableName()); + partListSpec.setPartitionList(new PartitionListComposingSpec(partitionsOutsideTableDir)); + partSpecs.add(partListSpec); + } + return partSpecs; + } + + + private static PartitionSpec getSharedSDPartSpec(Table table, StorageDescriptorKey sdKey, List partitions) { + StorageDescriptor sd; + if (sdKey.getSd() == null) { + //sd is not requested set it empty StorageDescriptor in the PartitionSpec + sd = new StorageDescriptor(); + } else { + sd = new StorageDescriptor(sdKey.getSd()); + sd.setLocation(sdKey.baseLocation); // Use table-dir as root-dir. + } + PartitionSpecWithSharedSD sharedSDPartSpec = + new PartitionSpecWithSharedSD(); + sharedSDPartSpec.setPartitions(partitions); + sharedSDPartSpec.setSd(sd); + + PartitionSpec ret = new PartitionSpec(); + ret.setRootPath(sd.getLocation()); + ret.setSharedSDPartitionSpec(sharedSDPartSpec); + ret.setDbName(table.getDbName()); + ret.setTableName(table.getTableName()); + + return ret; + } /** * Position in the array returned by {@link #parseDbName} that has the catalog name. */ diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 0934aeb3a7d5413cacde500a5575e4f676306bd0..aba3c0082a07dc523a972e3b6e3e872be65ec1d4 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -19,6 +19,20 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.ISchemaName; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Catalog; @@ -377,6 +391,13 @@ public Partition alterPartition(String catName, String dbName, String tblName, L } @Override + public List getPartitionSpecsByFilterAndProjection(Table table, boolean allowSql, + boolean allowJdo, List fieldList, String paramKeys, boolean excludeFlag) + throws MetaException, NoSuchObjectException { + return objectStore.getPartitionSpecsByFilterAndProjection(table, allowSql, allowJdo, fieldList, paramKeys, excludeFlag); + } + + @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 70a17f51b9b5a9fb0b5640988318fd39a82b895d..f224d724117ca1d35e90d95880d78c8361b94d59 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -380,6 +380,13 @@ public Partition alterPartition(String catName, String db_name, String tbl_name, } @Override + public List getPartitionSpecsByFilterAndProjection(Table table, boolean allowSql, + boolean allowJdo, List fieldList, String paramKeys, boolean excludeFlag) + throws MetaException, NoSuchObjectException { + return Collections.emptyList(); + } + + @Override public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 35abd006d41f0eca354123fcfe6f590867f80cac..1994b2ccd5d9b5d014f24461555de836c8358d20 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -3528,4 +3528,10 @@ public void truncateTable(String dbName, String tableName, throws TException { throw new UnsupportedOperationException(); } + + @Override + public GetPartitionsResponse getPartitionsWithSpecs(GetPartitionsRequest request) + throws TException { + throw new UnsupportedOperationException(); + } } diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjection.java new file mode 100644 index 0000000000000000000000000000000000000000..bc7656e7915c2ef9e35522aa094960c1da5934cf --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestGetPartitionsUsingProjection.java @@ -0,0 +1,587 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.commons.beanutils.PropertyUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; +import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectSpec; +import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD; +import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.hadoop.hive.metastore.ColumnType.SERIALIZATION_FORMAT; + +@Category(MetastoreCheckinTest.class) +public class TestGetPartitionsUsingProjection { + private static final Logger LOG = LoggerFactory.getLogger(TestGetPartitionsUsingProjection.class); + protected static Configuration conf = MetastoreConf.newMetastoreConf(); + private static int port; + private static final String dbName = "test_projection_db"; + private static final String tblName = "test_projection_table"; + private List origPartitions; + private Table tbl; + private static final String EXCLUDE_KEY_PREFIX = "exclude"; + private HiveMetaStoreClient client; + + @BeforeClass + public static void startMetaStoreServer() throws Exception { + conf.set("hive.in.test", "true"); + MetaStoreTestUtils.setConfForStandloneMode(conf); + MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); + MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, 100); + MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); + port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + LOG.info("Starting MetaStore Server on port " + port); + + try (HiveMetaStoreClient client = createClient()) { + new DatabaseBuilder().setName(dbName).create(client, conf); + } + } + + @AfterClass + public static void tearDown() throws Exception { + try (HiveMetaStoreClient client = createClient()) { + client.dropDatabase(dbName, true, true, true); + } + } + + @Before + public void setup() throws TException { + // This is default case with setugi off for both client and server + client = createClient(); + createTestTables(); + origPartitions = client.listPartitions(dbName, tblName, (short) -1); + tbl = client.getTable(dbName, tblName); + } + + @After + public void cleanup() { + dropTestTables(); + client.close(); + client = null; + } + + private void dropTestTables() { + try { + client.dropTable(dbName, tblName); + } catch (TException e) { + // ignored + } + } + + private void createTestTables() throws TException { + if (client.tableExists(dbName, tblName)) { + LOG.info("Table is already existing. Dropping it and then recreating"); + client.dropTable(dbName, tblName); + } + new TableBuilder().setTableName(tblName).setDbName(dbName).setCols(Arrays + .asList(new FieldSchema("col1", "string", "c1 comment"), + new FieldSchema("col2", "int", "c2 comment"))).setPartCols(Arrays + .asList(new FieldSchema("state", "string", "state comment"), + new FieldSchema("city", "string", "city comment"))) + .setTableParams(new HashMap(2) {{ + put("tableparam1", "tableval1"); + put("tableparam2", "tableval2"); + }}).setBucketCols(Arrays.asList("col1")).addSortCol("col2", 1) + .addSerdeParam(SERIALIZATION_FORMAT, "1").setSerdeName(tblName) + .setSerdeLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe") + .setInputFormat("org.apache.hadoop.hive.ql.io.HiveInputFormat") + .setOutputFormat("org.apache.hadoop.hive.ql.io.HiveOutputFormat").create(client, conf); + + Table table = client.getTable(dbName, tblName); + Assert.assertTrue("Table " + dbName + "." + tblName + " does not exist", + client.tableExists(dbName, tblName)); + + List partitions = new ArrayList<>(); + partitions.add(createPartition(Arrays.asList("CA", "SanFrancisco"), table)); + partitions.add(createPartition(Arrays.asList("CA", "PaloAlto"), table)); + partitions.add(createPartition(Arrays.asList("WA", "Seattle"), table)); + partitions.add(createPartition(Arrays.asList("AZ", "Phoenix"), table)); + + client.add_partitions(partitions); + } + + private Partition createPartition(List vals, Table table) throws MetaException { + return new PartitionBuilder().inTable(table).setValues(vals).addPartParam("key1", "S1") + .addPartParam("key2", "S2").addPartParam(EXCLUDE_KEY_PREFIX + "key1", "e1") + .addPartParam(EXCLUDE_KEY_PREFIX + "key2", "e2") + .setBucketCols(table.getSd().getBucketCols()).setSortCols(table.getSd().getSortCols()) + .setSerdeName(table.getSd().getSerdeInfo().getName()) + .setSerdeLib(table.getSd().getSerdeInfo().getSerializationLib()) + .setSerdeParams(table.getSd().getSerdeInfo().getParameters()).build(conf); + } + + private static HiveMetaStoreClient createClient() throws MetaException { + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, false); + return new HiveMetaStoreClient(conf); + } + + @Test + public void testGetPartitions() throws TException { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + validateBasic(response); + } + + @Test + public void testPartitionProjectionEmptySpec() throws Throwable { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + + projectSpec.setFieldList(new ArrayList<>(0)); + projectSpec.setParamKeyPattern("exclude%"); + projectSpec.setExcludeParamKeyPattern(true); + + GetPartitionsResponse response; + response = client.getPartitionsWithSpecs(request); + Assert.assertTrue(response.getPartitionSpec().size() == 1); + PartitionSpec partitionSpec = response.getPartitionSpec().get(0); + PartitionSpecWithSharedSD partitionSpecWithSharedSD = partitionSpec.getSharedSDPartitionSpec(); + + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + Assert.assertNotNull(sharedSD); + // everything except location in sharedSD should be same + StorageDescriptor origSd = origPartitions.get(0).getSd().deepCopy(); + origSd.unsetLocation(); + StorageDescriptor sharedSDCopy = sharedSD.deepCopy(); + sharedSDCopy.unsetLocation(); + Assert.assertEquals(origSd, sharedSDCopy); + + List partitionWithoutSDS = partitionSpecWithSharedSD.getPartitions(); + Assert.assertNotNull(partitionWithoutSDS); + Assert.assertTrue("Unexpected number of partitions returned", + partitionWithoutSDS.size() == origPartitions.size()); + for (int i = 0; i < origPartitions.size(); i++) { + Partition origPartition = origPartitions.get(i); + PartitionWithoutSD retPartition = partitionWithoutSDS.get(i); + Assert.assertEquals(origPartition.getCreateTime(), retPartition.getCreateTime()); + Assert.assertEquals(origPartition.getLastAccessTime(), retPartition.getLastAccessTime()); + Assert.assertEquals(origPartition.getSd().getLocation(), + sharedSD.getLocation() + retPartition.getRelativePath()); + validateMap(origPartition.getParameters(), retPartition.getParameters()); + validateList(origPartition.getValues(), retPartition.getValues()); + } + } + + @Test + public void testPartitionProjectionAllSingleValuedFields() throws Throwable { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + + List projectedFields = Arrays + .asList("dbName", "tableName", "createTime", "lastAccessTime", "sd.location", + "sd.inputFormat", "sd.outputFormat", "sd.compressed", "sd.numBuckets", + "sd.serdeInfo.name", "sd.serdeInfo.serializationLib"/*, "sd.serdeInfo.serdeType"*/); + //TODO directSQL does not support serdeType, serializerClass and deserializerClass in serdeInfo + projectSpec.setFieldList(projectedFields); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + Assert.assertTrue(response.getPartitionSpec().size() == 1); + PartitionSpec partitionSpec = response.getPartitionSpec().get(0); + Assert.assertTrue("DbName is not set", partitionSpec.isSetDbName()); + Assert.assertTrue("tableName is not set", partitionSpec.isSetTableName()); + PartitionSpecWithSharedSD partitionSpecWithSharedSD = partitionSpec.getSharedSDPartitionSpec(); + + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + Assert.assertNotNull(sharedSD); + List partitionWithoutSDS = partitionSpecWithSharedSD.getPartitions(); + Assert.assertNotNull(partitionWithoutSDS); + Assert.assertTrue(partitionWithoutSDS.size() == origPartitions.size()); + int i = 0; + for (Partition origPart : origPartitions) { + for (String projectField : projectedFields) { + // dbname, tableName and catName is not stored in partition + if (projectField.equals("dbName") || projectField.equals("tableName") || projectField + .equals("catName")) + continue; + if (projectField.startsWith("sd")) { + String sdPropertyName = projectField.substring(projectField.indexOf("sd.") + 3); + if (sdPropertyName.equals("location")) { + // in case of location sharedSD has the base location and partition has relative location + Assert.assertEquals("Location does not match", origPart.getSd().getLocation(), + sharedSD.getLocation() + partitionWithoutSDS.get(i).getRelativePath()); + } else { + Assert.assertEquals(PropertyUtils.getNestedProperty(origPart, projectField), + PropertyUtils.getNestedProperty(sharedSD, sdPropertyName)); + } + } else { + Assert.assertEquals(PropertyUtils.getNestedProperty(origPart, projectField), + PropertyUtils.getNestedProperty(partitionWithoutSDS.get(i), projectField)); + } + } + i++; + } + } + + @Test + public void testPartitionProjectionAllMultiValuedFields() throws Throwable { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + List projectedFields = Arrays + .asList("values", "parameters", "sd.cols", "sd.bucketCols", "sd.sortCols", "sd.parameters", + "sd.skewedInfo", "sd.serdeInfo.parameters"); + projectSpec.setFieldList(projectedFields); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + + Assert.assertTrue(response.getPartitionSpec().size() == 1); + PartitionSpec partitionSpec = response.getPartitionSpec().get(0); + PartitionSpecWithSharedSD partitionSpecWithSharedSD = partitionSpec.getSharedSDPartitionSpec(); + Assert.assertEquals(origPartitions.size(), partitionSpecWithSharedSD.getPartitions().size()); + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + for (int i = 0; i < origPartitions.size(); i++) { + Partition origPartition = origPartitions.get(i); + PartitionWithoutSD retPartition = partitionSpecWithSharedSD.getPartitions().get(i); + for (String projectedField : projectedFields) { + switch (projectedField) { + case "values": + validateList(origPartition.getValues(), retPartition.getValues()); + break; + case "parameters": + validateMap(origPartition.getParameters(), retPartition.getParameters()); + break; + case "sd.cols": + validateList(origPartition.getSd().getCols(), sharedSD.getCols()); + break; + case "sd.bucketCols": + validateList(origPartition.getSd().getBucketCols(), sharedSD.getBucketCols()); + break; + case "sd.sortCols": + validateList(origPartition.getSd().getSortCols(), sharedSD.getSortCols()); + break; + case "sd.parameters": + validateMap(origPartition.getSd().getParameters(), sharedSD.getParameters()); + break; + case "sd.skewedInfo": + if (!origPartition.getSd().getSkewedInfo().getSkewedColNames().isEmpty()) { + validateList(origPartition.getSd().getSkewedInfo().getSkewedColNames(), + sharedSD.getSkewedInfo().getSkewedColNames()); + } + if (!origPartition.getSd().getSkewedInfo().getSkewedColValues().isEmpty()) { + for (int i1 = 0; + i1 < origPartition.getSd().getSkewedInfo().getSkewedColValuesSize(); i1++) { + validateList(origPartition.getSd().getSkewedInfo().getSkewedColValues().get(i1), + sharedSD.getSkewedInfo().getSkewedColValues().get(i1)); + } + } + if (!origPartition.getSd().getSkewedInfo().getSkewedColValueLocationMaps().isEmpty()) { + validateMap(origPartition.getSd().getSkewedInfo().getSkewedColValueLocationMaps(), + sharedSD.getSkewedInfo().getSkewedColValueLocationMaps()); + } + break; + case "sd.serdeInfo.parameters": + validateMap(origPartition.getSd().getSerdeInfo().getParameters(), + sharedSD.getSerdeInfo().getParameters()); + break; + default: + throw new IllegalArgumentException("Invalid field " + projectedField); + } + } + } + } + + @Test + public void testPartitionProjectionIncludeParameters() throws Throwable { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + projectSpec + .setFieldList(Arrays.asList("dbName", "tableName", "catName", "parameters", "values")); + projectSpec.setParamKeyPattern(EXCLUDE_KEY_PREFIX + "%"); + projectSpec.setExcludeParamKeyPattern(false); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + Assert.assertNotNull("All the partitions should be returned in sharedSD spec", + partitionSpecWithSharedSD); + PartitionListComposingSpec partitionListComposingSpec = + response.getPartitionSpec().get(0).getPartitionList(); + Assert.assertNull("Partition list composing spec should be null since all the " + + "partitions are expected to be in sharedSD spec", partitionListComposingSpec); + for (PartitionWithoutSD retPartion : partitionSpecWithSharedSD.getPartitions()) { + Assert.assertTrue("included parameter key is not found in the response", + retPartion.getParameters().containsKey(EXCLUDE_KEY_PREFIX + "key1")); + Assert.assertTrue("included parameter key is not found in the response", + retPartion.getParameters().containsKey(EXCLUDE_KEY_PREFIX + "key2")); + Assert.assertTrue("Additional parameters returned other than inclusion keys", + retPartion.getParameters().size() == 2); + } + } + + @Test + public void testPartitionProjectionExcludeParameters() throws Throwable { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + projectSpec + .setFieldList(Arrays.asList("dbName", "tableName", "catName", "parameters", "values")); + projectSpec.setParamKeyPattern(EXCLUDE_KEY_PREFIX + "%"); + projectSpec.setExcludeParamKeyPattern(true); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + Assert.assertNotNull("All the partitions should be returned in sharedSD spec", + partitionSpecWithSharedSD); + PartitionListComposingSpec partitionListComposingSpec = + response.getPartitionSpec().get(0).getPartitionList(); + Assert.assertNull("Partition list composing spec should be null", partitionListComposingSpec); + for (PartitionWithoutSD retPartion : partitionSpecWithSharedSD.getPartitions()) { + Assert.assertFalse("excluded parameter key is found in the response", + retPartion.getParameters().containsKey(EXCLUDE_KEY_PREFIX + "key1")); + Assert.assertFalse("excluded parameter key is found in the response", + retPartion.getParameters().containsKey(EXCLUDE_KEY_PREFIX + "key2")); + } + } + + @Test + public void testNestedMultiValuedFieldProjection() throws TException { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + projectSpec.setFieldList(Arrays.asList("sd.cols.name", "sd.cols.type")); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + Assert.assertNotNull("sd.cols were requested but was not returned", sharedSD.getCols()); + for (FieldSchema col : sharedSD.getCols()) { + Assert.assertTrue("sd.cols.name was requested but was not returned", col.isSetName()); + Assert.assertTrue("sd.cols.type was requested but was not returned", col.isSetType()); + Assert.assertFalse("sd.cols.comment was not requested but was returned", col.isSetComment()); + } + } + + @Test + public void testParameterExpansion() throws TException { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + projectSpec.setFieldList(Arrays.asList("sd.cols", "sd.serdeInfo")); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + Assert.assertNotNull("sd.cols were requested but was not returned", sharedSD.getCols()); + Assert.assertEquals("Returned serdeInfo does not match with original serdeInfo", + origPartitions.get(0).getSd().getCols(), sharedSD.getCols()); + + Assert + .assertNotNull("sd.serdeInfo were requested but was not returned", sharedSD.getSerdeInfo()); + Assert.assertEquals("Returned serdeInfo does not match with original serdeInfo", + origPartitions.get(0).getSd().getSerdeInfo(), sharedSD.getSerdeInfo()); + } + + @Test + public void testNonStandardPartitions() throws TException { + String testTblName = "test_non_standard"; + new TableBuilder().setTableName(testTblName).setDbName(dbName) + .addCol("ns_c1", "string", "comment 1").addCol("ns_c2", "int", "comment 2") + .addPartCol("part", "string").addPartCol("city", "string").addBucketCol("ns_c1") + .addSortCol("ns_c2", 1).addTableParam("tblparamKey", + "Partitions of this table are not located within table directory").create(client, conf); + + Table table = client.getTable(dbName, testTblName); + Assert.assertNotNull("Unable to create a test table ", table); + + List partitions = new ArrayList<>(); + partitions.add(createPartition(Arrays.asList("p1", "SanFrancisco"), table)); + partitions.add(createPartition(Arrays.asList("p1", "PaloAlto"), table)); + partitions.add(createPartition(Arrays.asList("p2", "Seattle"), table)); + partitions.add(createPartition(Arrays.asList("p2", "Phoenix"), table)); + + client.add_partitions(partitions); + // change locations of two of the partitions outside table directory + List testPartitions = client.listPartitions(dbName, testTblName, (short) -1); + Assert.assertEquals(4, testPartitions.size()); + Partition p1 = testPartitions.get(2); + p1.getSd().setLocation("/tmp/some_other_location/part=p2/city=Seattle"); + Partition p2 = testPartitions.get(3); + p2.getSd().setLocation("/tmp/some_other_location/part=p2/city=Phoenix"); + client.alter_partitions(dbName, testTblName, Arrays.asList(p1, p2)); + + GetPartitionsRequest request = getGetPartitionsRequest(); + request.getProjectionSpec().setFieldList(Arrays.asList("values", "sd")); + request.getFilterSpec().setDbName(dbName); + request.getFilterSpec().setTblName(testTblName); + + GetPartitionsResponse response = client.getPartitionsWithSpecs(request); + Assert.assertNotNull("Response should have returned partition specs", + response.getPartitionSpec()); + Assert + .assertEquals("We should have two partition specs", 2, response.getPartitionSpec().size()); + Assert.assertNotNull("One SharedSD spec is expected", + response.getPartitionSpec().get(0).getSharedSDPartitionSpec()); + Assert.assertNotNull("One composing spec is expected", + response.getPartitionSpec().get(1).getPartitionList()); + + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + Assert.assertNotNull("sd was requested but not returned", partitionSpecWithSharedSD.getSd()); + Assert.assertEquals("shared SD should have table location", table.getSd().getLocation(), + partitionSpecWithSharedSD.getSd().getLocation()); + List> expectedVals = new ArrayList<>(2); + expectedVals.add(Arrays.asList("p1", "PaloAlto")); + expectedVals.add(Arrays.asList("p1", "SanFrancisco")); + + int i = 0; + for (PartitionWithoutSD retPartition : partitionSpecWithSharedSD.getPartitions()) { + Assert.assertEquals(2, retPartition.getValuesSize()); + validateList(expectedVals.get(i), retPartition.getValues()); + Assert.assertNull("parameters were not requested so should have been null", + retPartition.getParameters()); + i++; + } + + PartitionListComposingSpec composingSpec = + response.getPartitionSpec().get(1).getPartitionList(); + Assert.assertNotNull("composing spec should have returend 2 partitions", + composingSpec.getPartitions()); + Assert.assertEquals("composing spec should have returend 2 partitions", 2, + composingSpec.getPartitionsSize()); + + expectedVals.clear(); + expectedVals.add(Arrays.asList("p2", "Phoenix")); + expectedVals.add(Arrays.asList("p2", "Seattle")); + i = 0; + + for (Partition partition : composingSpec.getPartitions()) { + Assert.assertEquals(2, partition.getValuesSize()); + validateList(expectedVals.get(i), partition.getValues()); + Assert.assertNull("parameters were not requested so should have been null", + partition.getParameters()); + i++; + } + } + + @Test(expected = TException.class) + public void testInvalidProjectFieldNames() throws TException { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + projectSpec.setFieldList(Arrays.asList("values", "invalid.field.name")); + client.getPartitionsWithSpecs(request); + } + + @Test(expected = TException.class) + public void testInvalidProjectFieldNames2() throws TException { + GetPartitionsRequest request = getGetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = request.getProjectionSpec(); + projectSpec.setFieldList(Arrays.asList("")); + client.getPartitionsWithSpecs(request); + } + + private void validateBasic(GetPartitionsResponse response) throws TException { + Assert.assertNotNull("Response is null", response); + Assert.assertNotNull("Returned partition spec is null", response.getPartitionSpec()); + Assert.assertEquals(1, response.getPartitionSpecSize()); + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + Assert.assertNotNull(partitionSpecWithSharedSD.getSd()); + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + Assert.assertEquals("Root location should be set to table location", tbl.getSd().getLocation(), + sharedSD.getLocation()); + + List partitionWithoutSDS = partitionSpecWithSharedSD.getPartitions(); + Assert.assertEquals(origPartitions.size(), partitionWithoutSDS.size()); + for (int i = 0; i < origPartitions.size(); i++) { + Partition origPartition = origPartitions.get(i); + PartitionWithoutSD returnedPartitionWithoutSD = partitionWithoutSDS.get(i); + Assert.assertEquals(String.format("Location returned for Partition %d is not correct", i), + origPartition.getSd().getLocation(), + sharedSD.getLocation() + returnedPartitionWithoutSD.getRelativePath()); + } + } + + private GetPartitionsRequest getGetPartitionsRequest() { + GetPartitionsRequest request = new GetPartitionsRequest(); + request.setProjectionSpec(new GetPartitionsProjectSpec()); + request.setFilterSpec(new GetPartitionsFilterSpec()); + request.getFilterSpec().setTblName(tblName); + request.getFilterSpec().setDbName(dbName); + return request; + } + + private void validateMap(Map aMap, Map bMap) { + if ((aMap == null || aMap.isEmpty()) && (bMap == null || bMap.isEmpty())) { + return; + } + // Equality is verified here because metastore updates stats automatically + // and adds them in the returned partition. So the returned partition will + // have parameters + some more parameters for the basic stats + Assert.assertTrue(bMap.size() >= aMap.size()); + for (Entry entries : aMap.entrySet()) { + Assert.assertTrue("Expected " + entries.getKey() + " is missing from the map", + bMap.containsKey(entries.getKey())); + Assert.assertEquals("Expected value to be " + aMap.get(entries.getKey()) + " found" + bMap + .get(entries.getKey()), aMap.get(entries.getKey()), bMap.get(entries.getKey())); + } + } + + private void validateList(List aList, List bList) { + if ((aList == null || aList.isEmpty()) && (bList == null || bList.isEmpty())) { + return; + } + Assert.assertEquals(aList.size(), bList.size()); + Iterator origValuesIt = aList.iterator(); + Iterator retValuesIt = bList.iterator(); + while (origValuesIt.hasNext()) { + Assert.assertTrue(retValuesIt.hasNext()); + Assert.assertEquals(origValuesIt.next(), retValuesIt.next()); + } + } +} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 60beab6350d1f4d86bdcf79f5119172117c5ca2e..255f1d771074c68b8768b04e0301e91c8fcf015b 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -20,6 +20,7 @@ import java.lang.reflect.Field; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; @@ -29,9 +30,11 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; @@ -40,6 +43,14 @@ import java.util.concurrent.TimeUnit; import com.google.common.collect.Sets; +import org.apache.commons.beanutils.PropertyUtils; +import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; +import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectSpec; +import org.apache.hadoop.hive.metastore.api.GetPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionsResponse; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD; +import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -121,6 +132,7 @@ public void setUp() throws Exception { conf.set("hive.key3", ""); conf.set("hive.key4", "0"); conf.set("datanucleus.autoCreateTables", "false"); + conf.set("hive.in.test", "true"); MetaStoreTestUtils.setConfForStandloneMode(conf); MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); @@ -464,7 +476,6 @@ private static void verifyPartitionsPublished(HiveMetaStoreClient client, private static List makeVals(String ds, String id) { List vals4 = new ArrayList<>(2); - vals4 = new ArrayList<>(2); vals4.add(ds); vals4.add(id); return vals4; @@ -659,6 +670,127 @@ public void testListPartitionNames() throws Throwable { } + @Test + public void testGetPartitionsWithSpec() throws Throwable { + // create a table with multiple partitions + List createdPartitions = setupProjectionTestTable(); + Table tbl = client.getTable("compdb", "comptbl"); + GetPartitionsRequest request = new GetPartitionsRequest(); + GetPartitionsProjectSpec projectSpec = new GetPartitionsProjectSpec(); + projectSpec.setFieldList(Arrays + .asList("dbName", "tableName", "catName", "parameters", "lastAccessTime", "sd.location", + "values", "createTime", "sd.serdeInfo.serializationLib", "sd.cols")); + projectSpec.setParamKeyPattern("exclude%"); + projectSpec.setExcludeParamKeyPattern(true); + GetPartitionsFilterSpec filter = new GetPartitionsFilterSpec(); + filter.setDbName("compdb"); + filter.setTblName("comptbl"); + request.setFilterSpec(filter); + request.setProjectionSpec(projectSpec); + GetPartitionsResponse response; + try { + response = client.getPartitionsWithSpecs(request); + } catch (Exception ex) { + ex.printStackTrace(); + LOG.error("Exception while retriveing partitions", ex); + throw ex; + } + + Assert.assertEquals(1, response.getPartitionSpecSize()); + PartitionSpecWithSharedSD partitionSpecWithSharedSD = + response.getPartitionSpec().get(0).getSharedSDPartitionSpec(); + Assert.assertNotNull(partitionSpecWithSharedSD.getSd()); + StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd(); + Assert.assertEquals("Root location should be set to table location", tbl.getSd().getLocation(), + sharedSD.getLocation()); + Assert.assertFalse("Fields which are not requested should not be set", + sharedSD.isSetParameters()); + Assert.assertNotNull( + "serializationLib class was requested but was not found in the returned partition", + sharedSD.getSerdeInfo().getSerializationLib()); + Assert.assertNotNull("db name was requested but was not found in the returned partition", + response.getPartitionSpec().get(0).getDbName()); + Assert.assertNotNull("Table name was requested but was not found in the returned partition", + response.getPartitionSpec().get(0).getTableName()); + Assert.assertTrue("sd.cols was requested but was not found in the returned response", + partitionSpecWithSharedSD.getSd().isSetCols()); + List origSdCols = createdPartitions.get(0).getSd().getCols(); + Assert.assertEquals("Size of the requested sd.cols should be same", origSdCols.size(), + partitionSpecWithSharedSD.getSd().getCols().size()); + for (int i = 0; i < origSdCols.size(); i++) { + FieldSchema origFs = origSdCols.get(i); + FieldSchema returnedFs = partitionSpecWithSharedSD.getSd().getCols().get(i); + Assert.assertEquals("Field schemas returned different than expected", origFs, returnedFs); + } + /*Assert + .assertNotNull("Catalog name was requested but was not found in the returned partition", + response.getPartitionSpec().get(0).getCatName());*/ + + List partitionWithoutSDS = partitionSpecWithSharedSD.getPartitions(); + Assert.assertEquals(createdPartitions.size(), partitionWithoutSDS.size()); + for (int i = 0; i < createdPartitions.size(); i++) { + Partition origPartition = createdPartitions.get(i); + PartitionWithoutSD returnedPartitionWithoutSD = partitionWithoutSDS.get(i); + Assert.assertEquals(String.format("Location returned for Partition %d is not correct", i), + origPartition.getSd().getLocation(), + sharedSD.getLocation() + returnedPartitionWithoutSD.getRelativePath()); + Assert.assertTrue("createTime was request but is not set", + returnedPartitionWithoutSD.isSetCreateTime()); + Assert.assertTrue("Partition parameters were requested but are not set", + returnedPartitionWithoutSD.isSetParameters()); + // first partition has parameters set + if (i == 0) { + Assert.assertTrue("partition parameters not set", + returnedPartitionWithoutSD.getParameters().containsKey("key1")); + Assert.assertEquals("partition parameters does not contain included keys", "val1", + returnedPartitionWithoutSD.getParameters().get("key1")); + // excluded parameter should not be returned + Assert.assertFalse("Excluded parameter key returned", + returnedPartitionWithoutSD.getParameters().containsKey("excludeKey1")); + Assert.assertFalse("Excluded parameter key returned", + returnedPartitionWithoutSD.getParameters().containsKey("excludeKey2")); + } + List returnedVals = returnedPartitionWithoutSD.getValues(); + List actualVals = origPartition.getValues(); + for (int j = 0; j < actualVals.size(); j++) { + Assert.assertEquals(actualVals.get(j), returnedVals.get(j)); + } + } + } + + + protected List setupProjectionTestTable() throws Throwable { + //String catName = "catName"; + String dbName = "compdb"; + String tblName = "comptbl"; + String typeName = "Person"; + //String catName = "catName"; + Map dummyparams = new HashMap<>(); + dummyparams.put("key1", "val1"); + dummyparams.put("excludeKey1", "excludeVal1"); + dummyparams.put("excludeKey2", "excludeVal2"); + cleanUp(dbName, tblName, typeName); + + List> values = new ArrayList<>(); + values.add(makeVals("2008-07-01 14:13:12", "14")); + values.add(makeVals("2008-07-01 14:13:12", "15")); + values.add(makeVals("2008-07-02 14:13:12", "15")); + values.add(makeVals("2008-07-03 14:13:12", "151")); + + List createdPartitions = + createMultiPartitionTableSchema(dbName, tblName, typeName, values); + Table tbl = client.getTable(dbName, tblName); + // add some dummy parameters to one of the partitions to confirm the fetching logic is working + Partition newPartition = createdPartitions.remove(0); + //Map sdParams = new HashMap<>(); + //dummyparams.put("sdkey1", "sdval1"); + newPartition.setParameters(dummyparams); + //newPartition.getSd().setParameters(sdParams); + + client.alter_partition(dbName, tblName, newPartition); + createdPartitions.add(0, newPartition); + return createdPartitions; + } @Test public void testDropTable() throws Throwable { @@ -2743,7 +2875,11 @@ private void createMaterializedView(String dbName, String tableName, Set return partitions; } - private void createMultiPartitionTableSchema(String dbName, String tblName, + private List createMultiPartitionTableSchema(String dbName, String tblName, + String typeName, List> values) throws Throwable { + return createMultiPartitionTableSchema(null, dbName, tblName, typeName, values); + } + private List createMultiPartitionTableSchema(String catName, String dbName, String tblName, String typeName, List> values) throws Throwable { createDb(dbName); @@ -2754,6 +2890,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) + .setCatName(catName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) @@ -2768,7 +2905,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, tbl = client.getTable(dbName, tblName); } - createPartitions(dbName, tbl, values); + return createPartitions(dbName, tbl, values); } @Test diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionProjectionEvaluator.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionProjectionEvaluator.java new file mode 100644 index 0000000000000000000000000000000000000000..673935f26f1625a2fa5ba902ec762284b5f6febb --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionProjectionEvaluator.java @@ -0,0 +1,279 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.hive.metastore.PartitionProjectionEvaluator.PartitionFieldNode; +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +import javax.jdo.PersistenceManager; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.PartitionProjectionEvaluator.CD_PATTERN; +import static org.apache.hadoop.hive.metastore.PartitionProjectionEvaluator.SD_PATTERN; +import static org.apache.hadoop.hive.metastore.PartitionProjectionEvaluator.SERDE_PATTERN; + +@Category(MetastoreUnitTest.class) +public class TestPartitionProjectionEvaluator { + private ImmutableMap fieldNameToColumnName = + ImmutableMap.builder() + .put("createTime", "\"PARTITIONS\"" + ".\"CREATE_TIME\"") + .put("lastAccessTime", "\"PARTITIONS\"" + ".\"LAST_ACCESS_TIME\"") + .put("sd.location", "\"SDS\"" + ".\"LOCATION\"") + .put("sd.inputFormat", "\"SDS\"" + ".\"INPUT_FORMAT\"") + .put("sd.outputFormat", "\"SDS\"" + ".\"OUTPUT_FORMAT\"") + .put("sd.storedAsSubDirectories", "\"SDS\"" + ".\"IS_STOREDASSUBDIRECTORIES\"") + .put("sd.compressed", "\"SDS\"" + ".\"IS_COMPRESSED\"") + .put("sd.numBuckets", "\"SDS\"" + ".\"NUM_BUCKETS\"") + .put("sd.serdeInfo.name", "\"SDS\"" + ".\"NAME\"") + .put("sd.serdeInfo.serializationLib", "\"SDS\"" + ".\"SLIB\"") + .put("PART_ID", "\"PARTITIONS\"" + ".\"PART_ID\"").put("SD_ID", "\"SDS\"" + ".\"SD_ID\"") + .put("SERDE_ID", "\"SERDES\"" + ".\"SERDE_ID\"").put("CD_ID", "\"SDS\"" + ".\"CD_ID\"") + .build(); + + private static void compareTreeUtil(PartitionFieldNode expected, PartitionFieldNode given) { + if (expected == null || given == null) { + Assert.assertTrue(expected == null && given == null); + } + Assert.assertEquals("Field names should match", expected.getFieldName(), given.getFieldName()); + Assert.assertEquals( + "Expected " + expected + " " + expected.isLeafNode() + " Given " + given + " " + given + .isLeafNode(), expected.isLeafNode(), given.isLeafNode()); + Assert.assertEquals( + "Expected " + expected + " " + expected.isMultiValued() + " Given " + given + " " + given + .isMultiValued(), expected.isMultiValued(), given.isMultiValued()); + for (PartitionFieldNode child : expected.getChildren()) { + Assert.assertTrue("given node " + given + " does not have the child node " + child, + given.getChildren().contains(child)); + int counter = 0; + for (PartitionFieldNode giveChild : given.getChildren()) { + if (child.equals(giveChild)) { + compareTreeUtil(child, giveChild); + counter++; + } + } + Assert.assertEquals("More than one copies of node " + child + " found", 1, counter); + } + } + + private static void compare(Set roots, Set giveRoots) { + Assert.assertEquals("Given roots size does not match with the size of expected number of roots", + roots.size(), giveRoots.size()); + for (PartitionFieldNode root : roots) { + Assert.assertTrue(giveRoots.contains(root)); + int counter = 0; + for (PartitionFieldNode givenRoot : giveRoots) { + if (givenRoot.equals(root)) { + compareTreeUtil(root, givenRoot); + counter++; + } + } + Assert.assertEquals("More than one copies of node found for " + root, 1, counter); + } + } + + @Test + public void testPartitionFieldNames() throws Exception { + Set results = PartitionProjectionEvaluator.getPartitionClassFields(); + Assert.assertNotNull(results); + results.removeAll(Arrays.asList("values", + "dbName", + "tableName", + "createTime", + "lastAccessTime", + "parameters", + "catName", + "sd.cols.name", + "sd.cols.type", + "sd.cols.comment", + "sd.location", + "sd.inputFormat", + "sd.outputFormat", + "sd.compressed", + "sd.numBuckets", + "sd.serdeInfo.name", + "sd.serdeInfo.serializationLib", + "sd.serdeInfo.parameters", + "sd.serdeInfo.description", + "sd.serdeInfo.serializerClass", + "sd.serdeInfo.deserializerClass", + "sd.serdeInfo.serdeType", + "sd.bucketCols", + "sd.sortCols.col", + "sd.sortCols.order", + "sd.parameters", + "sd.skewedInfo.skewedColNames", + "sd.skewedInfo.skewedColValues", + "sd.skewedInfo.skewedColValueLocationMaps", + "sd.storedAsSubDirectories")); + Assert.assertTrue("Some of the partition fields are not found " + Arrays + .toString(results.toArray(new String[results.size()])), results.isEmpty()); + } + + @Test + public void testPartitionFieldTree() throws MetaException { + PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); + List projectionFields = new ArrayList<>(2); + projectionFields.add("sd.location"); + projectionFields.add("sd.parameters"); + projectionFields.add("createTime"); + projectionFields.add("sd.serdeInfo.serializationLib"); + projectionFields.add("sd.cols"); + projectionFields.add("parameters"); + PartitionProjectionEvaluator projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Set roots = projectionEvaluator.getRoots(); + + Set expected = new HashSet<>(); + PartitionFieldNode sdNode = new PartitionFieldNode("sd"); + sdNode.addChild(new PartitionFieldNode("sd.location")); + sdNode.addChild(new PartitionFieldNode("sd.parameters", true)); + sdNode.addChild(new PartitionFieldNode("sd.cols", true)); + + PartitionFieldNode serdeNode = new PartitionFieldNode("sd.serdeInfo"); + serdeNode.addChild(new PartitionFieldNode("sd.serdeInfo.serializationLib")); + + sdNode.addChild(serdeNode); + expected.add(sdNode); + expected.add(new PartitionFieldNode("parameters", true)); + expected.add(new PartitionFieldNode("createTime")); + expected.add(new PartitionFieldNode("PART_ID")); + expected.add(new PartitionFieldNode("SD_ID")); + expected.add(new PartitionFieldNode("CD_ID")); + compare(expected, roots); + } + + @Test + public void testProjectionCompaction() throws MetaException { + PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); + List projectionFields = new ArrayList<>(2); + projectionFields.add("sd.location"); + projectionFields.add("sd.parameters"); + projectionFields.add("createTime"); + projectionFields.add("sd"); + PartitionProjectionEvaluator projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Set roots = projectionEvaluator.getRoots(); + Assert.assertFalse("sd.location should not contained since it is already included in sd", + roots.contains(new PartitionFieldNode("sd.location"))); + Assert.assertFalse("sd.parameters should not contained since it is already included in sd", + roots.contains(new PartitionFieldNode("sd.parameters"))); + } + + @Test(expected = MetaException.class) + public void testInvalidProjectFields() throws MetaException { + PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); + List projectionFields = new ArrayList<>(2); + projectionFields.add("sd.location"); + projectionFields.add("sd.parameters"); + projectionFields.add("createTime"); + projectionFields.add("sd"); + projectionFields.add("invalid"); + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, + null, false); + } + + @Test + public void testFind() throws MetaException { + PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); + List projectionFields = Arrays.asList("sd", "createTime", "sd.location", "parameters"); + PartitionProjectionEvaluator projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); + + projectionFields = Arrays.asList("sd", "createTime", "parameters"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.serdeInfo.serializationLib"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.location"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.location"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertFalse(projectionEvaluator.find(SERDE_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.serdeInfo.serializationLib"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(SERDE_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.serdeInfo"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(SERDE_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertFalse(projectionEvaluator.find(SD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sdxcols"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertFalse(projectionEvaluator.find(CD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.cols"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(CD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd.cols.name"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertTrue(projectionEvaluator.find(CD_PATTERN)); + + projectionFields = Arrays.asList("createTime", "parameters", "sd", "sd.location"); + projectionEvaluator = + new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, + false, null, false); + Assert.assertFalse(projectionEvaluator.find(CD_PATTERN)); + } +}