diff --git a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java index c9ef647..41d150c 100644 --- a/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ b/common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -110,9 +110,13 @@ public String getAggregator(Configuration conf) { */ public static final String[] fastStats = new String[] {NUM_FILES,TOTAL_SIZE}; - // This string constant is used by stats task to indicate to AlterHandler that - // alterPartition/alterTable is happening via statsTask. - public static final String STATS_GENERATED_VIA_STATS_TASK = "STATS_GENERATED_VIA_STATS_TASK"; + // This string constant is used to indicate to AlterHandler that + // alterPartition/alterTable is happening via statsTask or via user. + public static final String STATS_GENERATED = "STATS_GENERATED"; + + public static final String TASK = "TASK"; + + public static final String USER = "USER"; // This string constant is used by AlterHandler to figure out that it should not attempt to // update stats. It is set by any client-side task which wishes to signal that no stats @@ -127,6 +131,8 @@ public String getAggregator(Configuration conf) { public static final String BASIC_STATS = "BASIC_STATS"; + public static final String CASCADE = "CASCADE"; + public static final String TRUE = "true"; public static final String FALSE = "false"; diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index 36b624e..6caf3fe 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -252,7 +252,7 @@ public void alterPartition() throws Exception { Partition newPart = new Partition(Arrays.asList("today"), "default", "alterparttable", startTime, startTime + 1, sd, emptyParameters); - msClient.alter_partition("default", "alterparttable", newPart); + msClient.alter_partition("default", "alterparttable", newPart, null); NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); assertEquals(3, rsp.getEventsSize()); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 8601df0..9e9753f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -495,7 +495,7 @@ private static Partition makePartitionObject(String dbName, String tblName, part4.setSd(tbl.getSd().deepCopy()); part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy()); part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix); - MetaStoreUtils.updatePartitionStatsFast(part4, warehouse); + MetaStoreUtils.updatePartitionStatsFast(part4, warehouse, null); return part4; } @@ -702,7 +702,7 @@ public void testAlterViewParititon() throws Throwable { part2.getParameters().put("a", "b"); - client.alter_partition(dbName, viewName, part2); + client.alter_partition(dbName, viewName, part2, null); Partition part3 = client.getPartition(dbName, viewName, part.getValues()); assertEquals("couldn't view alter partition", part3.getParameters().get( @@ -789,7 +789,7 @@ public void testAlterPartition() throws Throwable { part2.getParameters().put("retention", "10"); part2.getSd().setNumBuckets(12); part2.getSd().getSerdeInfo().getParameters().put("abc", "1"); - client.alter_partition(dbName, tblName, part2); + client.alter_partition(dbName, tblName, part2, null); Partition part3 = client.getPartition(dbName, tblName, part.getValues()); assertEquals("couldn't alter partition", part3.getParameters().get( diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 8a37c11..6413762 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -173,7 +173,7 @@ public void testEnvironmentContext() throws Exception { assertEquals(envContext, appendPartEvent.getEnvironmentContext()); table.setTableName(renamed); - msc.alter_table(dbName, tblName, table, envContext); + msc.alter_table_with_environmentContext(dbName, tblName, table, envContext); listSize++; assertEquals(notifyList.size(), listSize); AlterTableEvent alterTableEvent = (AlterTableEvent) notifyList.get(listSize-1); @@ -181,7 +181,7 @@ public void testEnvironmentContext() throws Exception { assertEquals(envContext, alterTableEvent.getEnvironmentContext()); table.setTableName(tblName); - msc.alter_table(dbName, renamed, table, envContext); + msc.alter_table_with_environmentContext(dbName, renamed, table, envContext); listSize++; assertEquals(notifyList.size(), listSize); diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 9d8c092..33da870 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -1112,6 +1112,7 @@ service ThriftHiveMetastore extends fb303.FacebookService // prehooks are fired together followed by all post hooks void alter_partitions(1:string db_name, 2:string tbl_name, 3:list new_parts) throws (1:InvalidOperationException o1, 2:MetaException o2) + void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2) void alter_partition_with_environment_context(1:string db_name, 2:string tbl_name, 3:Partition new_part, diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 7c530b2..3076eaa 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -17167,6 +17167,289 @@ uint32_t ThriftHiveMetastore_alter_partitions_presult::read(::apache::thrift::pr } +ThriftHiveMetastore_alter_partitions_with_environment_context_args::~ThriftHiveMetastore_alter_partitions_with_environment_context_args() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->new_parts.clear(); + uint32_t _size1122; + ::apache::thrift::protocol::TType _etype1125; + xfer += iprot->readListBegin(_etype1125, _size1122); + this->new_parts.resize(_size1122); + uint32_t _i1126; + for (_i1126 = 0; _i1126 < _size1122; ++_i1126) + { + xfer += this->new_parts[_i1126].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.new_parts = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->environment_context.read(iprot); + this->__isset.environment_context = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); + std::vector ::const_iterator _iter1127; + for (_iter1127 = this->new_parts.begin(); _iter1127 != this->new_parts.end(); ++_iter1127) + { + xfer += (*_iter1127).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::~ThriftHiveMetastore_alter_partitions_with_environment_context_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); + std::vector ::const_iterator _iter1128; + for (_iter1128 = (*(this->new_parts)).begin(); _iter1128 != (*(this->new_parts)).end(); ++_iter1128) + { + xfer += (*_iter1128).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partitions_with_environment_context_result::~ThriftHiveMetastore_alter_partitions_with_environment_context_result() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_with_environment_context_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_alter_partitions_with_environment_context_presult::~ThriftHiveMetastore_alter_partitions_with_environment_context_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + ThriftHiveMetastore_alter_partition_with_environment_context_args::~ThriftHiveMetastore_alter_partition_with_environment_context_args() throw() { } @@ -17467,14 +17750,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1122; - ::apache::thrift::protocol::TType _etype1125; - xfer += iprot->readListBegin(_etype1125, _size1122); - this->part_vals.resize(_size1122); - uint32_t _i1126; - for (_i1126 = 0; _i1126 < _size1122; ++_i1126) + uint32_t _size1129; + ::apache::thrift::protocol::TType _etype1132; + xfer += iprot->readListBegin(_etype1132, _size1129); + this->part_vals.resize(_size1129); + uint32_t _i1133; + for (_i1133 = 0; _i1133 < _size1129; ++_i1133) { - xfer += iprot->readString(this->part_vals[_i1126]); + xfer += iprot->readString(this->part_vals[_i1133]); } xfer += iprot->readListEnd(); } @@ -17519,10 +17802,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1127; - for (_iter1127 = this->part_vals.begin(); _iter1127 != this->part_vals.end(); ++_iter1127) + std::vector ::const_iterator _iter1134; + for (_iter1134 = this->part_vals.begin(); _iter1134 != this->part_vals.end(); ++_iter1134) { - xfer += oprot->writeString((*_iter1127)); + xfer += oprot->writeString((*_iter1134)); } xfer += oprot->writeListEnd(); } @@ -17558,10 +17841,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1128; - for (_iter1128 = (*(this->part_vals)).begin(); _iter1128 != (*(this->part_vals)).end(); ++_iter1128) + std::vector ::const_iterator _iter1135; + for (_iter1135 = (*(this->part_vals)).begin(); _iter1135 != (*(this->part_vals)).end(); ++_iter1135) { - xfer += oprot->writeString((*_iter1128)); + xfer += oprot->writeString((*_iter1135)); } xfer += oprot->writeListEnd(); } @@ -17734,14 +18017,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1129; - ::apache::thrift::protocol::TType _etype1132; - xfer += iprot->readListBegin(_etype1132, _size1129); - this->part_vals.resize(_size1129); - uint32_t _i1133; - for (_i1133 = 0; _i1133 < _size1129; ++_i1133) + uint32_t _size1136; + ::apache::thrift::protocol::TType _etype1139; + xfer += iprot->readListBegin(_etype1139, _size1136); + this->part_vals.resize(_size1136); + uint32_t _i1140; + for (_i1140 = 0; _i1140 < _size1136; ++_i1140) { - xfer += iprot->readString(this->part_vals[_i1133]); + xfer += iprot->readString(this->part_vals[_i1140]); } xfer += iprot->readListEnd(); } @@ -17778,10 +18061,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1134; - for (_iter1134 = this->part_vals.begin(); _iter1134 != this->part_vals.end(); ++_iter1134) + std::vector ::const_iterator _iter1141; + for (_iter1141 = this->part_vals.begin(); _iter1141 != this->part_vals.end(); ++_iter1141) { - xfer += oprot->writeString((*_iter1134)); + xfer += oprot->writeString((*_iter1141)); } xfer += oprot->writeListEnd(); } @@ -17809,10 +18092,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1135; - for (_iter1135 = (*(this->part_vals)).begin(); _iter1135 != (*(this->part_vals)).end(); ++_iter1135) + std::vector ::const_iterator _iter1142; + for (_iter1142 = (*(this->part_vals)).begin(); _iter1142 != (*(this->part_vals)).end(); ++_iter1142) { - xfer += oprot->writeString((*_iter1135)); + xfer += oprot->writeString((*_iter1142)); } xfer += oprot->writeListEnd(); } @@ -18287,14 +18570,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1136; - ::apache::thrift::protocol::TType _etype1139; - xfer += iprot->readListBegin(_etype1139, _size1136); - this->success.resize(_size1136); - uint32_t _i1140; - for (_i1140 = 0; _i1140 < _size1136; ++_i1140) + uint32_t _size1143; + ::apache::thrift::protocol::TType _etype1146; + xfer += iprot->readListBegin(_etype1146, _size1143); + this->success.resize(_size1143); + uint32_t _i1147; + for (_i1147 = 0; _i1147 < _size1143; ++_i1147) { - xfer += iprot->readString(this->success[_i1140]); + xfer += iprot->readString(this->success[_i1147]); } xfer += iprot->readListEnd(); } @@ -18333,10 +18616,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1141; - for (_iter1141 = this->success.begin(); _iter1141 != this->success.end(); ++_iter1141) + std::vector ::const_iterator _iter1148; + for (_iter1148 = this->success.begin(); _iter1148 != this->success.end(); ++_iter1148) { - xfer += oprot->writeString((*_iter1141)); + xfer += oprot->writeString((*_iter1148)); } xfer += oprot->writeListEnd(); } @@ -18381,14 +18664,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1142; - ::apache::thrift::protocol::TType _etype1145; - xfer += iprot->readListBegin(_etype1145, _size1142); - (*(this->success)).resize(_size1142); - uint32_t _i1146; - for (_i1146 = 0; _i1146 < _size1142; ++_i1146) + uint32_t _size1149; + ::apache::thrift::protocol::TType _etype1152; + xfer += iprot->readListBegin(_etype1152, _size1149); + (*(this->success)).resize(_size1149); + uint32_t _i1153; + for (_i1153 = 0; _i1153 < _size1149; ++_i1153) { - xfer += iprot->readString((*(this->success))[_i1146]); + xfer += iprot->readString((*(this->success))[_i1153]); } xfer += iprot->readListEnd(); } @@ -18526,17 +18809,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1147; - ::apache::thrift::protocol::TType _ktype1148; - ::apache::thrift::protocol::TType _vtype1149; - xfer += iprot->readMapBegin(_ktype1148, _vtype1149, _size1147); - uint32_t _i1151; - for (_i1151 = 0; _i1151 < _size1147; ++_i1151) + uint32_t _size1154; + ::apache::thrift::protocol::TType _ktype1155; + ::apache::thrift::protocol::TType _vtype1156; + xfer += iprot->readMapBegin(_ktype1155, _vtype1156, _size1154); + uint32_t _i1158; + for (_i1158 = 0; _i1158 < _size1154; ++_i1158) { - std::string _key1152; - xfer += iprot->readString(_key1152); - std::string& _val1153 = this->success[_key1152]; - xfer += iprot->readString(_val1153); + std::string _key1159; + xfer += iprot->readString(_key1159); + std::string& _val1160 = this->success[_key1159]; + xfer += iprot->readString(_val1160); } xfer += iprot->readMapEnd(); } @@ -18575,11 +18858,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1154; - for (_iter1154 = this->success.begin(); _iter1154 != this->success.end(); ++_iter1154) + std::map ::const_iterator _iter1161; + for (_iter1161 = this->success.begin(); _iter1161 != this->success.end(); ++_iter1161) { - xfer += oprot->writeString(_iter1154->first); - xfer += oprot->writeString(_iter1154->second); + xfer += oprot->writeString(_iter1161->first); + xfer += oprot->writeString(_iter1161->second); } xfer += oprot->writeMapEnd(); } @@ -18624,17 +18907,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1155; - ::apache::thrift::protocol::TType _ktype1156; - ::apache::thrift::protocol::TType _vtype1157; - xfer += iprot->readMapBegin(_ktype1156, _vtype1157, _size1155); - uint32_t _i1159; - for (_i1159 = 0; _i1159 < _size1155; ++_i1159) + uint32_t _size1162; + ::apache::thrift::protocol::TType _ktype1163; + ::apache::thrift::protocol::TType _vtype1164; + xfer += iprot->readMapBegin(_ktype1163, _vtype1164, _size1162); + uint32_t _i1166; + for (_i1166 = 0; _i1166 < _size1162; ++_i1166) { - std::string _key1160; - xfer += iprot->readString(_key1160); - std::string& _val1161 = (*(this->success))[_key1160]; - xfer += iprot->readString(_val1161); + std::string _key1167; + xfer += iprot->readString(_key1167); + std::string& _val1168 = (*(this->success))[_key1167]; + xfer += iprot->readString(_val1168); } xfer += iprot->readMapEnd(); } @@ -18709,17 +18992,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1162; - ::apache::thrift::protocol::TType _ktype1163; - ::apache::thrift::protocol::TType _vtype1164; - xfer += iprot->readMapBegin(_ktype1163, _vtype1164, _size1162); - uint32_t _i1166; - for (_i1166 = 0; _i1166 < _size1162; ++_i1166) + uint32_t _size1169; + ::apache::thrift::protocol::TType _ktype1170; + ::apache::thrift::protocol::TType _vtype1171; + xfer += iprot->readMapBegin(_ktype1170, _vtype1171, _size1169); + uint32_t _i1173; + for (_i1173 = 0; _i1173 < _size1169; ++_i1173) { - std::string _key1167; - xfer += iprot->readString(_key1167); - std::string& _val1168 = this->part_vals[_key1167]; - xfer += iprot->readString(_val1168); + std::string _key1174; + xfer += iprot->readString(_key1174); + std::string& _val1175 = this->part_vals[_key1174]; + xfer += iprot->readString(_val1175); } xfer += iprot->readMapEnd(); } @@ -18730,9 +19013,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1169; - xfer += iprot->readI32(ecast1169); - this->eventType = (PartitionEventType::type)ecast1169; + int32_t ecast1176; + xfer += iprot->readI32(ecast1176); + this->eventType = (PartitionEventType::type)ecast1176; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -18766,11 +19049,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1170; - for (_iter1170 = this->part_vals.begin(); _iter1170 != this->part_vals.end(); ++_iter1170) + std::map ::const_iterator _iter1177; + for (_iter1177 = this->part_vals.begin(); _iter1177 != this->part_vals.end(); ++_iter1177) { - xfer += oprot->writeString(_iter1170->first); - xfer += oprot->writeString(_iter1170->second); + xfer += oprot->writeString(_iter1177->first); + xfer += oprot->writeString(_iter1177->second); } xfer += oprot->writeMapEnd(); } @@ -18806,11 +19089,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1171; - for (_iter1171 = (*(this->part_vals)).begin(); _iter1171 != (*(this->part_vals)).end(); ++_iter1171) + std::map ::const_iterator _iter1178; + for (_iter1178 = (*(this->part_vals)).begin(); _iter1178 != (*(this->part_vals)).end(); ++_iter1178) { - xfer += oprot->writeString(_iter1171->first); - xfer += oprot->writeString(_iter1171->second); + xfer += oprot->writeString(_iter1178->first); + xfer += oprot->writeString(_iter1178->second); } xfer += oprot->writeMapEnd(); } @@ -19079,17 +19362,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1172; - ::apache::thrift::protocol::TType _ktype1173; - ::apache::thrift::protocol::TType _vtype1174; - xfer += iprot->readMapBegin(_ktype1173, _vtype1174, _size1172); - uint32_t _i1176; - for (_i1176 = 0; _i1176 < _size1172; ++_i1176) + uint32_t _size1179; + ::apache::thrift::protocol::TType _ktype1180; + ::apache::thrift::protocol::TType _vtype1181; + xfer += iprot->readMapBegin(_ktype1180, _vtype1181, _size1179); + uint32_t _i1183; + for (_i1183 = 0; _i1183 < _size1179; ++_i1183) { - std::string _key1177; - xfer += iprot->readString(_key1177); - std::string& _val1178 = this->part_vals[_key1177]; - xfer += iprot->readString(_val1178); + std::string _key1184; + xfer += iprot->readString(_key1184); + std::string& _val1185 = this->part_vals[_key1184]; + xfer += iprot->readString(_val1185); } xfer += iprot->readMapEnd(); } @@ -19100,9 +19383,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1179; - xfer += iprot->readI32(ecast1179); - this->eventType = (PartitionEventType::type)ecast1179; + int32_t ecast1186; + xfer += iprot->readI32(ecast1186); + this->eventType = (PartitionEventType::type)ecast1186; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -19136,11 +19419,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1180; - for (_iter1180 = this->part_vals.begin(); _iter1180 != this->part_vals.end(); ++_iter1180) + std::map ::const_iterator _iter1187; + for (_iter1187 = this->part_vals.begin(); _iter1187 != this->part_vals.end(); ++_iter1187) { - xfer += oprot->writeString(_iter1180->first); - xfer += oprot->writeString(_iter1180->second); + xfer += oprot->writeString(_iter1187->first); + xfer += oprot->writeString(_iter1187->second); } xfer += oprot->writeMapEnd(); } @@ -19176,11 +19459,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1181; - for (_iter1181 = (*(this->part_vals)).begin(); _iter1181 != (*(this->part_vals)).end(); ++_iter1181) + std::map ::const_iterator _iter1188; + for (_iter1188 = (*(this->part_vals)).begin(); _iter1188 != (*(this->part_vals)).end(); ++_iter1188) { - xfer += oprot->writeString(_iter1181->first); - xfer += oprot->writeString(_iter1181->second); + xfer += oprot->writeString(_iter1188->first); + xfer += oprot->writeString(_iter1188->second); } xfer += oprot->writeMapEnd(); } @@ -20616,14 +20899,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1182; - ::apache::thrift::protocol::TType _etype1185; - xfer += iprot->readListBegin(_etype1185, _size1182); - this->success.resize(_size1182); - uint32_t _i1186; - for (_i1186 = 0; _i1186 < _size1182; ++_i1186) + uint32_t _size1189; + ::apache::thrift::protocol::TType _etype1192; + xfer += iprot->readListBegin(_etype1192, _size1189); + this->success.resize(_size1189); + uint32_t _i1193; + for (_i1193 = 0; _i1193 < _size1189; ++_i1193) { - xfer += this->success[_i1186].read(iprot); + xfer += this->success[_i1193].read(iprot); } xfer += iprot->readListEnd(); } @@ -20670,10 +20953,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1187; - for (_iter1187 = this->success.begin(); _iter1187 != this->success.end(); ++_iter1187) + std::vector ::const_iterator _iter1194; + for (_iter1194 = this->success.begin(); _iter1194 != this->success.end(); ++_iter1194) { - xfer += (*_iter1187).write(oprot); + xfer += (*_iter1194).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20722,14 +21005,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1188; - ::apache::thrift::protocol::TType _etype1191; - xfer += iprot->readListBegin(_etype1191, _size1188); - (*(this->success)).resize(_size1188); - uint32_t _i1192; - for (_i1192 = 0; _i1192 < _size1188; ++_i1192) + uint32_t _size1195; + ::apache::thrift::protocol::TType _etype1198; + xfer += iprot->readListBegin(_etype1198, _size1195); + (*(this->success)).resize(_size1195); + uint32_t _i1199; + for (_i1199 = 0; _i1199 < _size1195; ++_i1199) { - xfer += (*(this->success))[_i1192].read(iprot); + xfer += (*(this->success))[_i1199].read(iprot); } xfer += iprot->readListEnd(); } @@ -20907,14 +21190,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1193; - ::apache::thrift::protocol::TType _etype1196; - xfer += iprot->readListBegin(_etype1196, _size1193); - this->success.resize(_size1193); - uint32_t _i1197; - for (_i1197 = 0; _i1197 < _size1193; ++_i1197) + uint32_t _size1200; + ::apache::thrift::protocol::TType _etype1203; + xfer += iprot->readListBegin(_etype1203, _size1200); + this->success.resize(_size1200); + uint32_t _i1204; + for (_i1204 = 0; _i1204 < _size1200; ++_i1204) { - xfer += iprot->readString(this->success[_i1197]); + xfer += iprot->readString(this->success[_i1204]); } xfer += iprot->readListEnd(); } @@ -20953,10 +21236,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1198; - for (_iter1198 = this->success.begin(); _iter1198 != this->success.end(); ++_iter1198) + std::vector ::const_iterator _iter1205; + for (_iter1205 = this->success.begin(); _iter1205 != this->success.end(); ++_iter1205) { - xfer += oprot->writeString((*_iter1198)); + xfer += oprot->writeString((*_iter1205)); } xfer += oprot->writeListEnd(); } @@ -21001,14 +21284,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1199; - ::apache::thrift::protocol::TType _etype1202; - xfer += iprot->readListBegin(_etype1202, _size1199); - (*(this->success)).resize(_size1199); - uint32_t _i1203; - for (_i1203 = 0; _i1203 < _size1199; ++_i1203) + uint32_t _size1206; + ::apache::thrift::protocol::TType _etype1209; + xfer += iprot->readListBegin(_etype1209, _size1206); + (*(this->success)).resize(_size1206); + uint32_t _i1210; + for (_i1210 = 0; _i1210 < _size1206; ++_i1210) { - xfer += iprot->readString((*(this->success))[_i1203]); + xfer += iprot->readString((*(this->success))[_i1210]); } xfer += iprot->readListEnd(); } @@ -24581,14 +24864,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1204; - ::apache::thrift::protocol::TType _etype1207; - xfer += iprot->readListBegin(_etype1207, _size1204); - this->success.resize(_size1204); - uint32_t _i1208; - for (_i1208 = 0; _i1208 < _size1204; ++_i1208) + uint32_t _size1211; + ::apache::thrift::protocol::TType _etype1214; + xfer += iprot->readListBegin(_etype1214, _size1211); + this->success.resize(_size1211); + uint32_t _i1215; + for (_i1215 = 0; _i1215 < _size1211; ++_i1215) { - xfer += iprot->readString(this->success[_i1208]); + xfer += iprot->readString(this->success[_i1215]); } xfer += iprot->readListEnd(); } @@ -24627,10 +24910,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1209; - for (_iter1209 = this->success.begin(); _iter1209 != this->success.end(); ++_iter1209) + std::vector ::const_iterator _iter1216; + for (_iter1216 = this->success.begin(); _iter1216 != this->success.end(); ++_iter1216) { - xfer += oprot->writeString((*_iter1209)); + xfer += oprot->writeString((*_iter1216)); } xfer += oprot->writeListEnd(); } @@ -24675,14 +24958,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1210; - ::apache::thrift::protocol::TType _etype1213; - xfer += iprot->readListBegin(_etype1213, _size1210); - (*(this->success)).resize(_size1210); - uint32_t _i1214; - for (_i1214 = 0; _i1214 < _size1210; ++_i1214) + uint32_t _size1217; + ::apache::thrift::protocol::TType _etype1220; + xfer += iprot->readListBegin(_etype1220, _size1217); + (*(this->success)).resize(_size1217); + uint32_t _i1221; + for (_i1221 = 0; _i1221 < _size1217; ++_i1221) { - xfer += iprot->readString((*(this->success))[_i1214]); + xfer += iprot->readString((*(this->success))[_i1221]); } xfer += iprot->readListEnd(); } @@ -25642,14 +25925,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1215; - ::apache::thrift::protocol::TType _etype1218; - xfer += iprot->readListBegin(_etype1218, _size1215); - this->success.resize(_size1215); - uint32_t _i1219; - for (_i1219 = 0; _i1219 < _size1215; ++_i1219) + uint32_t _size1222; + ::apache::thrift::protocol::TType _etype1225; + xfer += iprot->readListBegin(_etype1225, _size1222); + this->success.resize(_size1222); + uint32_t _i1226; + for (_i1226 = 0; _i1226 < _size1222; ++_i1226) { - xfer += iprot->readString(this->success[_i1219]); + xfer += iprot->readString(this->success[_i1226]); } xfer += iprot->readListEnd(); } @@ -25688,10 +25971,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1220; - for (_iter1220 = this->success.begin(); _iter1220 != this->success.end(); ++_iter1220) + std::vector ::const_iterator _iter1227; + for (_iter1227 = this->success.begin(); _iter1227 != this->success.end(); ++_iter1227) { - xfer += oprot->writeString((*_iter1220)); + xfer += oprot->writeString((*_iter1227)); } xfer += oprot->writeListEnd(); } @@ -25736,14 +26019,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1221; - ::apache::thrift::protocol::TType _etype1224; - xfer += iprot->readListBegin(_etype1224, _size1221); - (*(this->success)).resize(_size1221); - uint32_t _i1225; - for (_i1225 = 0; _i1225 < _size1221; ++_i1225) + uint32_t _size1228; + ::apache::thrift::protocol::TType _etype1231; + xfer += iprot->readListBegin(_etype1231, _size1228); + (*(this->success)).resize(_size1228); + uint32_t _i1232; + for (_i1232 = 0; _i1232 < _size1228; ++_i1232) { - xfer += iprot->readString((*(this->success))[_i1225]); + xfer += iprot->readString((*(this->success))[_i1232]); } xfer += iprot->readListEnd(); } @@ -25816,9 +26099,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1226; - xfer += iprot->readI32(ecast1226); - this->principal_type = (PrincipalType::type)ecast1226; + int32_t ecast1233; + xfer += iprot->readI32(ecast1233); + this->principal_type = (PrincipalType::type)ecast1233; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -25834,9 +26117,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1227; - xfer += iprot->readI32(ecast1227); - this->grantorType = (PrincipalType::type)ecast1227; + int32_t ecast1234; + xfer += iprot->readI32(ecast1234); + this->grantorType = (PrincipalType::type)ecast1234; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -26107,9 +26390,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1228; - xfer += iprot->readI32(ecast1228); - this->principal_type = (PrincipalType::type)ecast1228; + int32_t ecast1235; + xfer += iprot->readI32(ecast1235); + this->principal_type = (PrincipalType::type)ecast1235; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -26340,9 +26623,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1229; - xfer += iprot->readI32(ecast1229); - this->principal_type = (PrincipalType::type)ecast1229; + int32_t ecast1236; + xfer += iprot->readI32(ecast1236); + this->principal_type = (PrincipalType::type)ecast1236; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -26431,14 +26714,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1230; - ::apache::thrift::protocol::TType _etype1233; - xfer += iprot->readListBegin(_etype1233, _size1230); - this->success.resize(_size1230); - uint32_t _i1234; - for (_i1234 = 0; _i1234 < _size1230; ++_i1234) + uint32_t _size1237; + ::apache::thrift::protocol::TType _etype1240; + xfer += iprot->readListBegin(_etype1240, _size1237); + this->success.resize(_size1237); + uint32_t _i1241; + for (_i1241 = 0; _i1241 < _size1237; ++_i1241) { - xfer += this->success[_i1234].read(iprot); + xfer += this->success[_i1241].read(iprot); } xfer += iprot->readListEnd(); } @@ -26477,10 +26760,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1235; - for (_iter1235 = this->success.begin(); _iter1235 != this->success.end(); ++_iter1235) + std::vector ::const_iterator _iter1242; + for (_iter1242 = this->success.begin(); _iter1242 != this->success.end(); ++_iter1242) { - xfer += (*_iter1235).write(oprot); + xfer += (*_iter1242).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26525,14 +26808,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1236; - ::apache::thrift::protocol::TType _etype1239; - xfer += iprot->readListBegin(_etype1239, _size1236); - (*(this->success)).resize(_size1236); - uint32_t _i1240; - for (_i1240 = 0; _i1240 < _size1236; ++_i1240) + uint32_t _size1243; + ::apache::thrift::protocol::TType _etype1246; + xfer += iprot->readListBegin(_etype1246, _size1243); + (*(this->success)).resize(_size1243); + uint32_t _i1247; + for (_i1247 = 0; _i1247 < _size1243; ++_i1247) { - xfer += (*(this->success))[_i1240].read(iprot); + xfer += (*(this->success))[_i1247].read(iprot); } xfer += iprot->readListEnd(); } @@ -27228,14 +27511,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1241; - ::apache::thrift::protocol::TType _etype1244; - xfer += iprot->readListBegin(_etype1244, _size1241); - this->group_names.resize(_size1241); - uint32_t _i1245; - for (_i1245 = 0; _i1245 < _size1241; ++_i1245) + uint32_t _size1248; + ::apache::thrift::protocol::TType _etype1251; + xfer += iprot->readListBegin(_etype1251, _size1248); + this->group_names.resize(_size1248); + uint32_t _i1252; + for (_i1252 = 0; _i1252 < _size1248; ++_i1252) { - xfer += iprot->readString(this->group_names[_i1245]); + xfer += iprot->readString(this->group_names[_i1252]); } xfer += iprot->readListEnd(); } @@ -27272,10 +27555,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1246; - for (_iter1246 = this->group_names.begin(); _iter1246 != this->group_names.end(); ++_iter1246) + std::vector ::const_iterator _iter1253; + for (_iter1253 = this->group_names.begin(); _iter1253 != this->group_names.end(); ++_iter1253) { - xfer += oprot->writeString((*_iter1246)); + xfer += oprot->writeString((*_iter1253)); } xfer += oprot->writeListEnd(); } @@ -27307,10 +27590,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1247; - for (_iter1247 = (*(this->group_names)).begin(); _iter1247 != (*(this->group_names)).end(); ++_iter1247) + std::vector ::const_iterator _iter1254; + for (_iter1254 = (*(this->group_names)).begin(); _iter1254 != (*(this->group_names)).end(); ++_iter1254) { - xfer += oprot->writeString((*_iter1247)); + xfer += oprot->writeString((*_iter1254)); } xfer += oprot->writeListEnd(); } @@ -27485,9 +27768,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1248; - xfer += iprot->readI32(ecast1248); - this->principal_type = (PrincipalType::type)ecast1248; + int32_t ecast1255; + xfer += iprot->readI32(ecast1255); + this->principal_type = (PrincipalType::type)ecast1255; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -27592,14 +27875,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1249; - ::apache::thrift::protocol::TType _etype1252; - xfer += iprot->readListBegin(_etype1252, _size1249); - this->success.resize(_size1249); - uint32_t _i1253; - for (_i1253 = 0; _i1253 < _size1249; ++_i1253) + uint32_t _size1256; + ::apache::thrift::protocol::TType _etype1259; + xfer += iprot->readListBegin(_etype1259, _size1256); + this->success.resize(_size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) { - xfer += this->success[_i1253].read(iprot); + xfer += this->success[_i1260].read(iprot); } xfer += iprot->readListEnd(); } @@ -27638,10 +27921,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1254; - for (_iter1254 = this->success.begin(); _iter1254 != this->success.end(); ++_iter1254) + std::vector ::const_iterator _iter1261; + for (_iter1261 = this->success.begin(); _iter1261 != this->success.end(); ++_iter1261) { - xfer += (*_iter1254).write(oprot); + xfer += (*_iter1261).write(oprot); } xfer += oprot->writeListEnd(); } @@ -27686,14 +27969,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1255; - ::apache::thrift::protocol::TType _etype1258; - xfer += iprot->readListBegin(_etype1258, _size1255); - (*(this->success)).resize(_size1255); - uint32_t _i1259; - for (_i1259 = 0; _i1259 < _size1255; ++_i1259) + uint32_t _size1262; + ::apache::thrift::protocol::TType _etype1265; + xfer += iprot->readListBegin(_etype1265, _size1262); + (*(this->success)).resize(_size1262); + uint32_t _i1266; + for (_i1266 = 0; _i1266 < _size1262; ++_i1266) { - xfer += (*(this->success))[_i1259].read(iprot); + xfer += (*(this->success))[_i1266].read(iprot); } xfer += iprot->readListEnd(); } @@ -28381,14 +28664,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1260; - ::apache::thrift::protocol::TType _etype1263; - xfer += iprot->readListBegin(_etype1263, _size1260); - this->group_names.resize(_size1260); - uint32_t _i1264; - for (_i1264 = 0; _i1264 < _size1260; ++_i1264) + uint32_t _size1267; + ::apache::thrift::protocol::TType _etype1270; + xfer += iprot->readListBegin(_etype1270, _size1267); + this->group_names.resize(_size1267); + uint32_t _i1271; + for (_i1271 = 0; _i1271 < _size1267; ++_i1271) { - xfer += iprot->readString(this->group_names[_i1264]); + xfer += iprot->readString(this->group_names[_i1271]); } xfer += iprot->readListEnd(); } @@ -28421,10 +28704,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1265; - for (_iter1265 = this->group_names.begin(); _iter1265 != this->group_names.end(); ++_iter1265) + std::vector ::const_iterator _iter1272; + for (_iter1272 = this->group_names.begin(); _iter1272 != this->group_names.end(); ++_iter1272) { - xfer += oprot->writeString((*_iter1265)); + xfer += oprot->writeString((*_iter1272)); } xfer += oprot->writeListEnd(); } @@ -28452,10 +28735,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1266; - for (_iter1266 = (*(this->group_names)).begin(); _iter1266 != (*(this->group_names)).end(); ++_iter1266) + std::vector ::const_iterator _iter1273; + for (_iter1273 = (*(this->group_names)).begin(); _iter1273 != (*(this->group_names)).end(); ++_iter1273) { - xfer += oprot->writeString((*_iter1266)); + xfer += oprot->writeString((*_iter1273)); } xfer += oprot->writeListEnd(); } @@ -28496,14 +28779,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1267; - ::apache::thrift::protocol::TType _etype1270; - xfer += iprot->readListBegin(_etype1270, _size1267); - this->success.resize(_size1267); - uint32_t _i1271; - for (_i1271 = 0; _i1271 < _size1267; ++_i1271) + uint32_t _size1274; + ::apache::thrift::protocol::TType _etype1277; + xfer += iprot->readListBegin(_etype1277, _size1274); + this->success.resize(_size1274); + uint32_t _i1278; + for (_i1278 = 0; _i1278 < _size1274; ++_i1278) { - xfer += iprot->readString(this->success[_i1271]); + xfer += iprot->readString(this->success[_i1278]); } xfer += iprot->readListEnd(); } @@ -28542,10 +28825,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1272; - for (_iter1272 = this->success.begin(); _iter1272 != this->success.end(); ++_iter1272) + std::vector ::const_iterator _iter1279; + for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) { - xfer += oprot->writeString((*_iter1272)); + xfer += oprot->writeString((*_iter1279)); } xfer += oprot->writeListEnd(); } @@ -28590,14 +28873,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1273; - ::apache::thrift::protocol::TType _etype1276; - xfer += iprot->readListBegin(_etype1276, _size1273); - (*(this->success)).resize(_size1273); - uint32_t _i1277; - for (_i1277 = 0; _i1277 < _size1273; ++_i1277) + uint32_t _size1280; + ::apache::thrift::protocol::TType _etype1283; + xfer += iprot->readListBegin(_etype1283, _size1280); + (*(this->success)).resize(_size1280); + uint32_t _i1284; + for (_i1284 = 0; _i1284 < _size1280; ++_i1284) { - xfer += iprot->readString((*(this->success))[_i1277]); + xfer += iprot->readString((*(this->success))[_i1284]); } xfer += iprot->readListEnd(); } @@ -37658,6 +37941,68 @@ void ThriftHiveMetastoreClient::recv_alter_partitions() return; } +void ThriftHiveMetastoreClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +{ + send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(); +} + +void ThriftHiveMetastoreClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.new_parts = &new_parts; + args.environment_context = &environment_context; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_alter_partitions_with_environment_context() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_partitions_with_environment_context") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + void ThriftHiveMetastoreClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); @@ -45852,6 +46197,65 @@ void ThriftHiveMetastoreProcessor::process_alter_partitions(int32_t seqid, ::apa } } +void ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.alter_partitions_with_environment_context", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + } + + ThriftHiveMetastore_alter_partitions_with_environment_context_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context", bytes); + } + + ThriftHiveMetastore_alter_partitions_with_environment_context_result result; + try { + iface_->alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context); + } catch (InvalidOperationException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context"); + } + + oprot->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.alter_partitions_with_environment_context", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -55811,6 +56215,95 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +{ + int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.new_parts = &new_parts; + args.environment_context = &environment_context; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("alter_partitions_with_environment_context") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 01b332e..95966e8 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -84,6 +84,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) = 0; virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0; virtual void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) = 0; + virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) = 0; virtual void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) = 0; virtual void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) = 0; virtual bool partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) = 0; @@ -379,6 +380,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void alter_partitions(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* new_parts */) { return; } + void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector & /* new_parts */, const EnvironmentContext& /* environment_context */) { + return; + } void alter_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) { return; } @@ -8818,6 +8822,139 @@ class ThriftHiveMetastore_alter_partitions_presult { }; +typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset { + _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_parts(false), environment_context(false) {} + bool db_name :1; + bool tbl_name :1; + bool new_parts :1; + bool environment_context :1; +} _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset; + +class ThriftHiveMetastore_alter_partitions_with_environment_context_args { + public: + + ThriftHiveMetastore_alter_partitions_with_environment_context_args(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&); + ThriftHiveMetastore_alter_partitions_with_environment_context_args& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&); + ThriftHiveMetastore_alter_partitions_with_environment_context_args() : db_name(), tbl_name() { + } + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_args() throw(); + std::string db_name; + std::string tbl_name; + std::vector new_parts; + EnvironmentContext environment_context; + + _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset __isset; + + void __set_db_name(const std::string& val); + + void __set_tbl_name(const std::string& val); + + void __set_new_parts(const std::vector & val); + + void __set_environment_context(const EnvironmentContext& val); + + bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_args & rhs) const + { + if (!(db_name == rhs.db_name)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + if (!(new_parts == rhs.new_parts)) + return false; + if (!(environment_context == rhs.environment_context)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_partitions_with_environment_context_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_partitions_with_environment_context_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_alter_partitions_with_environment_context_pargs { + public: + + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_pargs() throw(); + const std::string* db_name; + const std::string* tbl_name; + const std::vector * new_parts; + const EnvironmentContext* environment_context; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset { + _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset; + +class ThriftHiveMetastore_alter_partitions_with_environment_context_result { + public: + + ThriftHiveMetastore_alter_partitions_with_environment_context_result(const ThriftHiveMetastore_alter_partitions_with_environment_context_result&); + ThriftHiveMetastore_alter_partitions_with_environment_context_result& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_result&); + ThriftHiveMetastore_alter_partitions_with_environment_context_result() { + } + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_result() throw(); + InvalidOperationException o1; + MetaException o2; + + _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset __isset; + + void __set_o1(const InvalidOperationException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_alter_partitions_with_environment_context_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_alter_partitions_with_environment_context_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset { + _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset; + +class ThriftHiveMetastore_alter_partitions_with_environment_context_presult { + public: + + + virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_presult() throw(); + InvalidOperationException o1; + MetaException o2; + + _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_alter_partition_with_environment_context_args__isset { _ThriftHiveMetastore_alter_partition_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_part(false), environment_context(false) {} bool db_name :1; @@ -17467,6 +17604,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); void recv_alter_partitions(); + void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void recv_alter_partitions_with_environment_context(); void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void recv_alter_partition_with_environment_context(); @@ -17752,6 +17892,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_partitions_by_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_alter_partitions_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_alter_partition_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_rename_partition(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_partition_name_has_valid_characters(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -17889,6 +18030,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_partitions_by_names"] = &ThriftHiveMetastoreProcessor::process_get_partitions_by_names; processMap_["alter_partition"] = &ThriftHiveMetastoreProcessor::process_alter_partition; processMap_["alter_partitions"] = &ThriftHiveMetastoreProcessor::process_alter_partitions; + processMap_["alter_partitions_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_partitions_with_environment_context; processMap_["alter_partition_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_alter_partition_with_environment_context; processMap_["rename_partition"] = &ThriftHiveMetastoreProcessor::process_rename_partition; processMap_["partition_name_has_valid_characters"] = &ThriftHiveMetastoreProcessor::process_partition_name_has_valid_characters; @@ -18591,6 +18733,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->alter_partitions(db_name, tbl_name, new_parts); } + void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + } + ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + } + void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { size_t sz = ifaces_.size(); size_t i = 0; @@ -19473,6 +19624,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); int32_t send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts); void recv_alter_partitions(const int32_t seqid); + void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + int32_t send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context); + void recv_alter_partitions_with_environment_context(const int32_t seqid); void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); int32_t send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context); void recv_alter_partition_with_environment_context(const int32_t seqid); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 0fee77b..8cd3974 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -332,6 +332,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("alter_partitions\n"); } + void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { + // Your implementation goes here + printf("alter_partitions_with_environment_context\n"); + } + void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { // Your implementation goes here printf("alter_partition_with_environment_context\n"); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 7ae18a1..404909b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -166,6 +166,8 @@ public void alter_partitions(String db_name, String tbl_name, List new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException; public void rename_partition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException; @@ -436,6 +438,8 @@ public void alter_partitions(String db_name, String tbl_name, List new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void rename_partition(String db_name, String tbl_name, List part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -2516,6 +2520,35 @@ public void recv_alter_partitions() throws InvalidOperationException, MetaExcept return; } + public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(); + } + + public void send_alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context) throws org.apache.thrift.TException + { + alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args(); + args.setDb_name(db_name); + args.setTbl_name(tbl_name); + args.setNew_parts(new_parts); + args.setEnvironment_context(environment_context); + sendBase("alter_partitions_with_environment_context", args); + } + + public void recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException + { + alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + receiveBase(result, "alter_partitions_with_environment_context"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException { send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); @@ -6807,6 +6840,47 @@ public void getResult() throws InvalidOperationException, MetaException, org.apa } } + public void alter_partitions_with_environment_context(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(db_name, tbl_name, new_parts, environment_context, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class alter_partitions_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall { + private String db_name; + private String tbl_name; + private List new_parts; + private EnvironmentContext environment_context; + public alter_partitions_with_environment_context_call(String db_name, String tbl_name, List new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.new_parts = new_parts; + this.environment_context = environment_context; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context", org.apache.thrift.protocol.TMessageType.CALL, 0)); + alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args(); + args.setDb_name(db_name); + args.setTbl_name(tbl_name); + args.setNew_parts(new_parts); + args.setEnvironment_context(environment_context); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_alter_partitions_with_environment_context(); + } + } + public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); alter_partition_with_environment_context_call method_call = new alter_partition_with_environment_context_call(db_name, tbl_name, new_part, environment_context, resultHandler, this, ___protocolFactory, ___transport); @@ -9304,6 +9378,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public alter_partitions_with_environment_context() { + super("alter_partitions_with_environment_context"); + } + + public alter_partitions_with_environment_context_args getEmptyArgsInstance() { + return new alter_partitions_with_environment_context_args(); + } + + protected boolean isOneway() { + return false; + } + + public alter_partitions_with_environment_context_result getResult(I iface, alter_partitions_with_environment_context_args args) throws org.apache.thrift.TException { + alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + try { + iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context); + } catch (InvalidOperationException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + public static class alter_partition_with_environment_context extends org.apache.thrift.ProcessFunction { public alter_partition_with_environment_context() { super("alter_partition_with_environment_context"); @@ -12875,6 +12976,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public alter_partitions_with_environment_context() { + super("alter_partitions_with_environment_context"); + } + + public alter_partitions_with_environment_context_args getEmptyArgsInstance() { + return new alter_partitions_with_environment_context_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result(); + if (e instanceof InvalidOperationException) { + result.o1 = (InvalidOperationException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler); + } + } + public static class alter_partition_with_environment_context extends org.apache.thrift.AsyncProcessFunction { public alter_partition_with_environment_context() { super("alter_partition_with_environment_context"); @@ -92506,30 +92669,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_res } - public static class alter_partition_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partition_with_environment_context_args"); + public static class alter_partitions_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partitions_with_environment_context_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField NEW_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("new_parts", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_partition_with_environment_context_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_partition_with_environment_context_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_partitions_with_environment_context_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_partitions_with_environment_context_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required - private Partition new_part; // required + private List new_parts; // required private EnvironmentContext environment_context; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - NEW_PART((short)3, "new_part"), + NEW_PARTS((short)3, "new_parts"), ENVIRONMENT_CONTEXT((short)4, "environment_context"); private static final Map byName = new HashMap(); @@ -92549,8 +92712,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // NEW_PART - return NEW_PART; + case 3: // NEW_PARTS + return NEW_PARTS; case 4: // ENVIRONMENT_CONTEXT return ENVIRONMENT_CONTEXT; default: @@ -92600,57 +92763,62 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("new_part", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); + tmpMap.put(_Fields.NEW_PARTS, new org.apache.thrift.meta_data.FieldMetaData("new_parts", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)))); tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partition_with_environment_context_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partitions_with_environment_context_args.class, metaDataMap); } - public alter_partition_with_environment_context_args() { + public alter_partitions_with_environment_context_args() { } - public alter_partition_with_environment_context_args( + public alter_partitions_with_environment_context_args( String db_name, String tbl_name, - Partition new_part, + List new_parts, EnvironmentContext environment_context) { this(); this.db_name = db_name; this.tbl_name = tbl_name; - this.new_part = new_part; + this.new_parts = new_parts; this.environment_context = environment_context; } /** * Performs a deep copy on other. */ - public alter_partition_with_environment_context_args(alter_partition_with_environment_context_args other) { + public alter_partitions_with_environment_context_args(alter_partitions_with_environment_context_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } - if (other.isSetNew_part()) { - this.new_part = new Partition(other.new_part); + if (other.isSetNew_parts()) { + List __this__new_parts = new ArrayList(other.new_parts.size()); + for (Partition other_element : other.new_parts) { + __this__new_parts.add(new Partition(other_element)); + } + this.new_parts = __this__new_parts; } if (other.isSetEnvironment_context()) { this.environment_context = new EnvironmentContext(other.environment_context); } } - public alter_partition_with_environment_context_args deepCopy() { - return new alter_partition_with_environment_context_args(this); + public alter_partitions_with_environment_context_args deepCopy() { + return new alter_partitions_with_environment_context_args(this); } @Override public void clear() { this.db_name = null; this.tbl_name = null; - this.new_part = null; + this.new_parts = null; this.environment_context = null; } @@ -92700,26 +92868,41 @@ public void setTbl_nameIsSet(boolean value) { } } - public Partition getNew_part() { - return this.new_part; + public int getNew_partsSize() { + return (this.new_parts == null) ? 0 : this.new_parts.size(); } - public void setNew_part(Partition new_part) { - this.new_part = new_part; + public java.util.Iterator getNew_partsIterator() { + return (this.new_parts == null) ? null : this.new_parts.iterator(); } - public void unsetNew_part() { - this.new_part = null; + public void addToNew_parts(Partition elem) { + if (this.new_parts == null) { + this.new_parts = new ArrayList(); + } + this.new_parts.add(elem); } - /** Returns true if field new_part is set (has been assigned a value) and false otherwise */ - public boolean isSetNew_part() { - return this.new_part != null; + public List getNew_parts() { + return this.new_parts; } - public void setNew_partIsSet(boolean value) { + public void setNew_parts(List new_parts) { + this.new_parts = new_parts; + } + + public void unsetNew_parts() { + this.new_parts = null; + } + + /** Returns true if field new_parts is set (has been assigned a value) and false otherwise */ + public boolean isSetNew_parts() { + return this.new_parts != null; + } + + public void setNew_partsIsSet(boolean value) { if (!value) { - this.new_part = null; + this.new_parts = null; } } @@ -92764,11 +92947,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case NEW_PART: + case NEW_PARTS: if (value == null) { - unsetNew_part(); + unsetNew_parts(); } else { - setNew_part((Partition)value); + setNew_parts((List)value); } break; @@ -92791,8 +92974,8 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); - case NEW_PART: - return getNew_part(); + case NEW_PARTS: + return getNew_parts(); case ENVIRONMENT_CONTEXT: return getEnvironment_context(); @@ -92812,8 +92995,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); - case NEW_PART: - return isSetNew_part(); + case NEW_PARTS: + return isSetNew_parts(); case ENVIRONMENT_CONTEXT: return isSetEnvironment_context(); } @@ -92824,12 +93007,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_partition_with_environment_context_args) - return this.equals((alter_partition_with_environment_context_args)that); + if (that instanceof alter_partitions_with_environment_context_args) + return this.equals((alter_partitions_with_environment_context_args)that); return false; } - public boolean equals(alter_partition_with_environment_context_args that) { + public boolean equals(alter_partitions_with_environment_context_args that) { if (that == null) return false; @@ -92851,12 +93034,12 @@ public boolean equals(alter_partition_with_environment_context_args that) { return false; } - boolean this_present_new_part = true && this.isSetNew_part(); - boolean that_present_new_part = true && that.isSetNew_part(); - if (this_present_new_part || that_present_new_part) { - if (!(this_present_new_part && that_present_new_part)) + boolean this_present_new_parts = true && this.isSetNew_parts(); + boolean that_present_new_parts = true && that.isSetNew_parts(); + if (this_present_new_parts || that_present_new_parts) { + if (!(this_present_new_parts && that_present_new_parts)) return false; - if (!this.new_part.equals(that.new_part)) + if (!this.new_parts.equals(that.new_parts)) return false; } @@ -92886,10 +93069,10 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); - boolean present_new_part = true && (isSetNew_part()); - list.add(present_new_part); - if (present_new_part) - list.add(new_part); + boolean present_new_parts = true && (isSetNew_parts()); + list.add(present_new_parts); + if (present_new_parts) + list.add(new_parts); boolean present_environment_context = true && (isSetEnvironment_context()); list.add(present_environment_context); @@ -92900,7 +93083,7 @@ public int hashCode() { } @Override - public int compareTo(alter_partition_with_environment_context_args other) { + public int compareTo(alter_partitions_with_environment_context_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -92927,12 +93110,12 @@ public int compareTo(alter_partition_with_environment_context_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetNew_part()).compareTo(other.isSetNew_part()); + lastComparison = Boolean.valueOf(isSetNew_parts()).compareTo(other.isSetNew_parts()); if (lastComparison != 0) { return lastComparison; } - if (isSetNew_part()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_part, other.new_part); + if (isSetNew_parts()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_parts, other.new_parts); if (lastComparison != 0) { return lastComparison; } @@ -92964,7 +93147,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_partition_with_environment_context_args("); + StringBuilder sb = new StringBuilder("alter_partitions_with_environment_context_args("); boolean first = true; sb.append("db_name:"); @@ -92983,11 +93166,11 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("new_part:"); - if (this.new_part == null) { + sb.append("new_parts:"); + if (this.new_parts == null) { sb.append("null"); } else { - sb.append(this.new_part); + sb.append(this.new_parts); } first = false; if (!first) sb.append(", "); @@ -93005,9 +93188,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (new_part != null) { - new_part.validate(); - } if (environment_context != null) { environment_context.validate(); } @@ -93029,15 +93209,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_partition_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { - public alter_partition_with_environment_context_argsStandardScheme getScheme() { - return new alter_partition_with_environment_context_argsStandardScheme(); + private static class alter_partitions_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_argsStandardScheme getScheme() { + return new alter_partitions_with_environment_context_argsStandardScheme(); } } - private static class alter_partition_with_environment_context_argsStandardScheme extends StandardScheme { + private static class alter_partitions_with_environment_context_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -93063,11 +93243,21 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_wit org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // NEW_PART - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.new_part = new Partition(); - struct.new_part.read(iprot); - struct.setNew_partIsSet(true); + case 3: // NEW_PARTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list942 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list942.size); + Partition _elem943; + for (int _i944 = 0; _i944 < _list942.size; ++_i944) + { + _elem943 = new Partition(); + _elem943.read(iprot); + struct.new_parts.add(_elem943); + } + iprot.readListEnd(); + } + struct.setNew_partsIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -93090,7 +93280,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_wit struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -93104,9 +93294,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_wi oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } - if (struct.new_part != null) { - oprot.writeFieldBegin(NEW_PART_FIELD_DESC); - struct.new_part.write(oprot); + if (struct.new_parts != null) { + oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); + for (Partition _iter945 : struct.new_parts) + { + _iter945.write(oprot); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } if (struct.environment_context != null) { @@ -93120,16 +93317,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_wi } - private static class alter_partition_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { - public alter_partition_with_environment_context_argsTupleScheme getScheme() { - return new alter_partition_with_environment_context_argsTupleScheme(); + private static class alter_partitions_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_argsTupleScheme getScheme() { + return new alter_partitions_with_environment_context_argsTupleScheme(); } } - private static class alter_partition_with_environment_context_argsTupleScheme extends TupleScheme { + private static class alter_partitions_with_environment_context_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -93138,7 +93335,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_wit if (struct.isSetTbl_name()) { optionals.set(1); } - if (struct.isSetNew_part()) { + if (struct.isSetNew_parts()) { optionals.set(2); } if (struct.isSetEnvironment_context()) { @@ -93151,8 +93348,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_wit if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } - if (struct.isSetNew_part()) { - struct.new_part.write(oprot); + if (struct.isSetNew_parts()) { + { + oprot.writeI32(struct.new_parts.size()); + for (Partition _iter946 : struct.new_parts) + { + _iter946.write(oprot); + } + } } if (struct.isSetEnvironment_context()) { struct.environment_context.write(oprot); @@ -93160,7 +93363,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_wit } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { @@ -93172,9 +93375,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with struct.setTbl_nameIsSet(true); } if (incoming.get(2)) { - struct.new_part = new Partition(); - struct.new_part.read(iprot); - struct.setNew_partIsSet(true); + { + org.apache.thrift.protocol.TList _list947 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list947.size); + Partition _elem948; + for (int _i949 = 0; _i949 < _list947.size; ++_i949) + { + _elem948 = new Partition(); + _elem948.read(iprot); + struct.new_parts.add(_elem948); + } + } + struct.setNew_partsIsSet(true); } if (incoming.get(3)) { struct.environment_context = new EnvironmentContext(); @@ -93186,16 +93398,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with } - public static class alter_partition_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partition_with_environment_context_result"); + public static class alter_partitions_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partitions_with_environment_context_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new alter_partition_with_environment_context_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new alter_partition_with_environment_context_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_partitions_with_environment_context_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_partitions_with_environment_context_resultTupleSchemeFactory()); } private InvalidOperationException o1; // required @@ -93271,13 +93483,13 @@ public String getFieldName() { tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partition_with_environment_context_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partitions_with_environment_context_result.class, metaDataMap); } - public alter_partition_with_environment_context_result() { + public alter_partitions_with_environment_context_result() { } - public alter_partition_with_environment_context_result( + public alter_partitions_with_environment_context_result( InvalidOperationException o1, MetaException o2) { @@ -93289,7 +93501,7 @@ public alter_partition_with_environment_context_result( /** * Performs a deep copy on other. */ - public alter_partition_with_environment_context_result(alter_partition_with_environment_context_result other) { + public alter_partitions_with_environment_context_result(alter_partitions_with_environment_context_result other) { if (other.isSetO1()) { this.o1 = new InvalidOperationException(other.o1); } @@ -93298,8 +93510,8 @@ public alter_partition_with_environment_context_result(alter_partition_with_envi } } - public alter_partition_with_environment_context_result deepCopy() { - return new alter_partition_with_environment_context_result(this); + public alter_partitions_with_environment_context_result deepCopy() { + return new alter_partitions_with_environment_context_result(this); } @Override @@ -93406,12 +93618,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_partition_with_environment_context_result) - return this.equals((alter_partition_with_environment_context_result)that); + if (that instanceof alter_partitions_with_environment_context_result) + return this.equals((alter_partitions_with_environment_context_result)that); return false; } - public boolean equals(alter_partition_with_environment_context_result that) { + public boolean equals(alter_partitions_with_environment_context_result that) { if (that == null) return false; @@ -93454,7 +93666,7 @@ public int hashCode() { } @Override - public int compareTo(alter_partition_with_environment_context_result other) { + public int compareTo(alter_partitions_with_environment_context_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -93498,7 +93710,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_partition_with_environment_context_result("); + StringBuilder sb = new StringBuilder("alter_partitions_with_environment_context_result("); boolean first = true; sb.append("o1:"); @@ -93541,15 +93753,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_partition_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { - public alter_partition_with_environment_context_resultStandardScheme getScheme() { - return new alter_partition_with_environment_context_resultStandardScheme(); + private static class alter_partitions_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_resultStandardScheme getScheme() { + return new alter_partitions_with_environment_context_resultStandardScheme(); } } - private static class alter_partition_with_environment_context_resultStandardScheme extends StandardScheme { + private static class alter_partitions_with_environment_context_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -93586,7 +93798,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_wit struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -93606,16 +93818,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_wi } - private static class alter_partition_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { - public alter_partition_with_environment_context_resultTupleScheme getScheme() { - return new alter_partition_with_environment_context_resultTupleScheme(); + private static class alter_partitions_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { + public alter_partitions_with_environment_context_resultTupleScheme getScheme() { + return new alter_partitions_with_environment_context_resultTupleScheme(); } } - private static class alter_partition_with_environment_context_resultTupleScheme extends TupleScheme { + private static class alter_partitions_with_environment_context_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -93634,7 +93846,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_wit } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { @@ -93652,31 +93864,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with } - public static class rename_partition_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("rename_partition_args"); + public static class alter_partition_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partition_with_environment_context_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); - private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)4); + private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new rename_partition_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new rename_partition_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_partition_with_environment_context_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_partition_with_environment_context_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required - private List part_vals; // required private Partition new_part; // required + private EnvironmentContext environment_context; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - PART_VALS((short)3, "part_vals"), - NEW_PART((short)4, "new_part"); + NEW_PART((short)3, "new_part"), + ENVIRONMENT_CONTEXT((short)4, "environment_context"); private static final Map byName = new HashMap(); @@ -93695,10 +93907,10 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // PART_VALS - return PART_VALS; - case 4: // NEW_PART + case 3: // NEW_PART return NEW_PART; + case 4: // ENVIRONMENT_CONTEXT + return ENVIRONMENT_CONTEXT; default: return null; } @@ -93746,60 +93958,58 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("new_part", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); + tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(rename_partition_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partition_with_environment_context_args.class, metaDataMap); } - public rename_partition_args() { + public alter_partition_with_environment_context_args() { } - public rename_partition_args( + public alter_partition_with_environment_context_args( String db_name, String tbl_name, - List part_vals, - Partition new_part) + Partition new_part, + EnvironmentContext environment_context) { this(); this.db_name = db_name; this.tbl_name = tbl_name; - this.part_vals = part_vals; this.new_part = new_part; + this.environment_context = environment_context; } /** * Performs a deep copy on other. */ - public rename_partition_args(rename_partition_args other) { + public alter_partition_with_environment_context_args(alter_partition_with_environment_context_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } - if (other.isSetPart_vals()) { - List __this__part_vals = new ArrayList(other.part_vals); - this.part_vals = __this__part_vals; - } if (other.isSetNew_part()) { this.new_part = new Partition(other.new_part); } + if (other.isSetEnvironment_context()) { + this.environment_context = new EnvironmentContext(other.environment_context); + } } - public rename_partition_args deepCopy() { - return new rename_partition_args(this); + public alter_partition_with_environment_context_args deepCopy() { + return new alter_partition_with_environment_context_args(this); } @Override public void clear() { this.db_name = null; this.tbl_name = null; - this.part_vals = null; this.new_part = null; + this.environment_context = null; } public String getDb_name() { @@ -93848,64 +94058,49 @@ public void setTbl_nameIsSet(boolean value) { } } - public int getPart_valsSize() { - return (this.part_vals == null) ? 0 : this.part_vals.size(); - } - - public java.util.Iterator getPart_valsIterator() { - return (this.part_vals == null) ? null : this.part_vals.iterator(); - } - - public void addToPart_vals(String elem) { - if (this.part_vals == null) { - this.part_vals = new ArrayList(); - } - this.part_vals.add(elem); - } - - public List getPart_vals() { - return this.part_vals; + public Partition getNew_part() { + return this.new_part; } - public void setPart_vals(List part_vals) { - this.part_vals = part_vals; + public void setNew_part(Partition new_part) { + this.new_part = new_part; } - public void unsetPart_vals() { - this.part_vals = null; + public void unsetNew_part() { + this.new_part = null; } - /** Returns true if field part_vals is set (has been assigned a value) and false otherwise */ - public boolean isSetPart_vals() { - return this.part_vals != null; + /** Returns true if field new_part is set (has been assigned a value) and false otherwise */ + public boolean isSetNew_part() { + return this.new_part != null; } - public void setPart_valsIsSet(boolean value) { + public void setNew_partIsSet(boolean value) { if (!value) { - this.part_vals = null; + this.new_part = null; } } - public Partition getNew_part() { - return this.new_part; + public EnvironmentContext getEnvironment_context() { + return this.environment_context; } - public void setNew_part(Partition new_part) { - this.new_part = new_part; + public void setEnvironment_context(EnvironmentContext environment_context) { + this.environment_context = environment_context; } - public void unsetNew_part() { - this.new_part = null; + public void unsetEnvironment_context() { + this.environment_context = null; } - /** Returns true if field new_part is set (has been assigned a value) and false otherwise */ - public boolean isSetNew_part() { - return this.new_part != null; + /** Returns true if field environment_context is set (has been assigned a value) and false otherwise */ + public boolean isSetEnvironment_context() { + return this.environment_context != null; } - public void setNew_partIsSet(boolean value) { + public void setEnvironment_contextIsSet(boolean value) { if (!value) { - this.new_part = null; + this.environment_context = null; } } @@ -93927,19 +94122,19 @@ public void setFieldValue(_Fields field, Object value) { } break; - case PART_VALS: + case NEW_PART: if (value == null) { - unsetPart_vals(); + unsetNew_part(); } else { - setPart_vals((List)value); + setNew_part((Partition)value); } break; - case NEW_PART: + case ENVIRONMENT_CONTEXT: if (value == null) { - unsetNew_part(); + unsetEnvironment_context(); } else { - setNew_part((Partition)value); + setEnvironment_context((EnvironmentContext)value); } break; @@ -93954,12 +94149,12 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); - case PART_VALS: - return getPart_vals(); - case NEW_PART: return getNew_part(); + case ENVIRONMENT_CONTEXT: + return getEnvironment_context(); + } throw new IllegalStateException(); } @@ -93975,10 +94170,10 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); - case PART_VALS: - return isSetPart_vals(); case NEW_PART: return isSetNew_part(); + case ENVIRONMENT_CONTEXT: + return isSetEnvironment_context(); } throw new IllegalStateException(); } @@ -93987,12 +94182,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof rename_partition_args) - return this.equals((rename_partition_args)that); + if (that instanceof alter_partition_with_environment_context_args) + return this.equals((alter_partition_with_environment_context_args)that); return false; } - public boolean equals(rename_partition_args that) { + public boolean equals(alter_partition_with_environment_context_args that) { if (that == null) return false; @@ -94014,15 +94209,6 @@ public boolean equals(rename_partition_args that) { return false; } - boolean this_present_part_vals = true && this.isSetPart_vals(); - boolean that_present_part_vals = true && that.isSetPart_vals(); - if (this_present_part_vals || that_present_part_vals) { - if (!(this_present_part_vals && that_present_part_vals)) - return false; - if (!this.part_vals.equals(that.part_vals)) - return false; - } - boolean this_present_new_part = true && this.isSetNew_part(); boolean that_present_new_part = true && that.isSetNew_part(); if (this_present_new_part || that_present_new_part) { @@ -94032,6 +94218,15 @@ public boolean equals(rename_partition_args that) { return false; } + boolean this_present_environment_context = true && this.isSetEnvironment_context(); + boolean that_present_environment_context = true && that.isSetEnvironment_context(); + if (this_present_environment_context || that_present_environment_context) { + if (!(this_present_environment_context && that_present_environment_context)) + return false; + if (!this.environment_context.equals(that.environment_context)) + return false; + } + return true; } @@ -94049,21 +94244,21 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); - boolean present_part_vals = true && (isSetPart_vals()); - list.add(present_part_vals); - if (present_part_vals) - list.add(part_vals); - boolean present_new_part = true && (isSetNew_part()); list.add(present_new_part); if (present_new_part) list.add(new_part); + boolean present_environment_context = true && (isSetEnvironment_context()); + list.add(present_environment_context); + if (present_environment_context) + list.add(environment_context); + return list.hashCode(); } @Override - public int compareTo(rename_partition_args other) { + public int compareTo(alter_partition_with_environment_context_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -94090,22 +94285,22 @@ public int compareTo(rename_partition_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(other.isSetPart_vals()); + lastComparison = Boolean.valueOf(isSetNew_part()).compareTo(other.isSetNew_part()); if (lastComparison != 0) { return lastComparison; } - if (isSetPart_vals()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.part_vals, other.part_vals); + if (isSetNew_part()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_part, other.new_part); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetNew_part()).compareTo(other.isSetNew_part()); + lastComparison = Boolean.valueOf(isSetEnvironment_context()).compareTo(other.isSetEnvironment_context()); if (lastComparison != 0) { return lastComparison; } - if (isSetNew_part()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_part, other.new_part); + if (isSetEnvironment_context()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environment_context, other.environment_context); if (lastComparison != 0) { return lastComparison; } @@ -94127,7 +94322,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("rename_partition_args("); + StringBuilder sb = new StringBuilder("alter_partition_with_environment_context_args("); boolean first = true; sb.append("db_name:"); @@ -94146,19 +94341,19 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("part_vals:"); - if (this.part_vals == null) { + sb.append("new_part:"); + if (this.new_part == null) { sb.append("null"); } else { - sb.append(this.part_vals); + sb.append(this.new_part); } first = false; if (!first) sb.append(", "); - sb.append("new_part:"); - if (this.new_part == null) { + sb.append("environment_context:"); + if (this.environment_context == null) { sb.append("null"); } else { - sb.append(this.new_part); + sb.append(this.environment_context); } first = false; sb.append(")"); @@ -94171,6 +94366,9 @@ public void validate() throws org.apache.thrift.TException { if (new_part != null) { new_part.validate(); } + if (environment_context != null) { + environment_context.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -94189,15 +94387,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class rename_partition_argsStandardSchemeFactory implements SchemeFactory { - public rename_partition_argsStandardScheme getScheme() { - return new rename_partition_argsStandardScheme(); + private static class alter_partition_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { + public alter_partition_with_environment_context_argsStandardScheme getScheme() { + return new alter_partition_with_environment_context_argsStandardScheme(); } } - private static class rename_partition_argsStandardScheme extends StandardScheme { + private static class alter_partition_with_environment_context_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -94223,29 +94421,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // PART_VALS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list942 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list942.size); - String _elem943; - for (int _i944 = 0; _i944 < _list942.size; ++_i944) - { - _elem943 = iprot.readString(); - struct.part_vals.add(_elem943); - } - iprot.readListEnd(); - } - struct.setPart_valsIsSet(true); + case 3: // NEW_PART + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.new_part = new Partition(); + struct.new_part.read(iprot); + struct.setNew_partIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // NEW_PART + case 4: // ENVIRONMENT_CONTEXT if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.new_part = new Partition(); - struct.new_part.read(iprot); - struct.setNew_partIsSet(true); + struct.environment_context = new EnvironmentContext(); + struct.environment_context.read(iprot); + struct.setEnvironment_contextIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -94259,7 +94448,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -94273,39 +94462,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } - if (struct.part_vals != null) { - oprot.writeFieldBegin(PART_VALS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter945 : struct.part_vals) - { - oprot.writeString(_iter945); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } if (struct.new_part != null) { oprot.writeFieldBegin(NEW_PART_FIELD_DESC); struct.new_part.write(oprot); oprot.writeFieldEnd(); } + if (struct.environment_context != null) { + oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); + struct.environment_context.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class rename_partition_argsTupleSchemeFactory implements SchemeFactory { - public rename_partition_argsTupleScheme getScheme() { - return new rename_partition_argsTupleScheme(); + private static class alter_partition_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { + public alter_partition_with_environment_context_argsTupleScheme getScheme() { + return new alter_partition_with_environment_context_argsTupleScheme(); } } - private static class rename_partition_argsTupleScheme extends TupleScheme { + private static class alter_partition_with_environment_context_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -94314,10 +94496,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetTbl_name()) { optionals.set(1); } - if (struct.isSetPart_vals()) { + if (struct.isSetNew_part()) { optionals.set(2); } - if (struct.isSetNew_part()) { + if (struct.isSetEnvironment_context()) { optionals.set(3); } oprot.writeBitSet(optionals, 4); @@ -94327,22 +94509,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } - if (struct.isSetPart_vals()) { - { - oprot.writeI32(struct.part_vals.size()); - for (String _iter946 : struct.part_vals) - { - oprot.writeString(_iter946); - } - } - } if (struct.isSetNew_part()) { struct.new_part.write(oprot); } + if (struct.isSetEnvironment_context()) { + struct.environment_context.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { @@ -94354,38 +94530,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg struct.setTbl_nameIsSet(true); } if (incoming.get(2)) { - { - org.apache.thrift.protocol.TList _list947 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list947.size); - String _elem948; - for (int _i949 = 0; _i949 < _list947.size; ++_i949) - { - _elem948 = iprot.readString(); - struct.part_vals.add(_elem948); - } - } - struct.setPart_valsIsSet(true); - } - if (incoming.get(3)) { struct.new_part = new Partition(); struct.new_part.read(iprot); struct.setNew_partIsSet(true); } + if (incoming.get(3)) { + struct.environment_context = new EnvironmentContext(); + struct.environment_context.read(iprot); + struct.setEnvironment_contextIsSet(true); + } } } } - public static class rename_partition_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("rename_partition_result"); + public static class alter_partition_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("alter_partition_with_environment_context_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new rename_partition_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new rename_partition_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new alter_partition_with_environment_context_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new alter_partition_with_environment_context_resultTupleSchemeFactory()); } private InvalidOperationException o1; // required @@ -94461,13 +94629,13 @@ public String getFieldName() { tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(rename_partition_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_partition_with_environment_context_result.class, metaDataMap); } - public rename_partition_result() { + public alter_partition_with_environment_context_result() { } - public rename_partition_result( + public alter_partition_with_environment_context_result( InvalidOperationException o1, MetaException o2) { @@ -94479,7 +94647,7 @@ public rename_partition_result( /** * Performs a deep copy on other. */ - public rename_partition_result(rename_partition_result other) { + public alter_partition_with_environment_context_result(alter_partition_with_environment_context_result other) { if (other.isSetO1()) { this.o1 = new InvalidOperationException(other.o1); } @@ -94488,8 +94656,8 @@ public rename_partition_result(rename_partition_result other) { } } - public rename_partition_result deepCopy() { - return new rename_partition_result(this); + public alter_partition_with_environment_context_result deepCopy() { + return new alter_partition_with_environment_context_result(this); } @Override @@ -94596,12 +94764,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof rename_partition_result) - return this.equals((rename_partition_result)that); + if (that instanceof alter_partition_with_environment_context_result) + return this.equals((alter_partition_with_environment_context_result)that); return false; } - public boolean equals(rename_partition_result that) { + public boolean equals(alter_partition_with_environment_context_result that) { if (that == null) return false; @@ -94644,7 +94812,7 @@ public int hashCode() { } @Override - public int compareTo(rename_partition_result other) { + public int compareTo(alter_partition_with_environment_context_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -94688,7 +94856,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("rename_partition_result("); + StringBuilder sb = new StringBuilder("alter_partition_with_environment_context_result("); boolean first = true; sb.append("o1:"); @@ -94731,15 +94899,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class rename_partition_resultStandardSchemeFactory implements SchemeFactory { - public rename_partition_resultStandardScheme getScheme() { - return new rename_partition_resultStandardScheme(); + private static class alter_partition_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { + public alter_partition_with_environment_context_resultStandardScheme getScheme() { + return new alter_partition_with_environment_context_resultStandardScheme(); } } - private static class rename_partition_resultStandardScheme extends StandardScheme { + private static class alter_partition_with_environment_context_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -94776,7 +94944,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_re struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -94796,16 +94964,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_r } - private static class rename_partition_resultTupleSchemeFactory implements SchemeFactory { - public rename_partition_resultTupleScheme getScheme() { - return new rename_partition_resultTupleScheme(); + private static class alter_partition_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { + public alter_partition_with_environment_context_resultTupleScheme getScheme() { + return new alter_partition_with_environment_context_resultTupleScheme(); } } - private static class rename_partition_resultTupleScheme extends TupleScheme { + private static class alter_partition_with_environment_context_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -94824,7 +94992,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_re } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, alter_partition_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { @@ -94842,25 +95010,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_res } - public static class partition_name_has_valid_characters_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("partition_name_has_valid_characters_args"); + public static class rename_partition_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("rename_partition_args"); - private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField THROW_EXCEPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("throw_exception", org.apache.thrift.protocol.TType.BOOL, (short)2); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("new_part", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new partition_name_has_valid_characters_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new partition_name_has_valid_characters_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new rename_partition_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new rename_partition_argsTupleSchemeFactory()); } + private String db_name; // required + private String tbl_name; // required private List part_vals; // required - private boolean throw_exception; // required + private Partition new_part; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PART_VALS((short)1, "part_vals"), - THROW_EXCEPTION((short)2, "throw_exception"); + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + PART_VALS((short)3, "part_vals"), + NEW_PART((short)4, "new_part"); private static final Map byName = new HashMap(); @@ -94875,10 +95049,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_res */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // PART_VALS + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // PART_VALS return PART_VALS; - case 2: // THROW_EXCEPTION - return THROW_EXCEPTION; + case 4: // NEW_PART + return NEW_PART; default: return null; } @@ -94919,54 +95097,1234 @@ public String getFieldName() { } // isset id assignments - private static final int __THROW_EXCEPTION_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.THROW_EXCEPTION, new org.apache.thrift.meta_data.FieldMetaData("throw_exception", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("new_part", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(partition_name_has_valid_characters_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(rename_partition_args.class, metaDataMap); } - public partition_name_has_valid_characters_args() { + public rename_partition_args() { } - public partition_name_has_valid_characters_args( + public rename_partition_args( + String db_name, + String tbl_name, List part_vals, - boolean throw_exception) + Partition new_part) { this(); + this.db_name = db_name; + this.tbl_name = tbl_name; this.part_vals = part_vals; - this.throw_exception = throw_exception; - setThrow_exceptionIsSet(true); + this.new_part = new_part; } /** * Performs a deep copy on other. */ - public partition_name_has_valid_characters_args(partition_name_has_valid_characters_args other) { - __isset_bitfield = other.__isset_bitfield; + public rename_partition_args(rename_partition_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } if (other.isSetPart_vals()) { List __this__part_vals = new ArrayList(other.part_vals); this.part_vals = __this__part_vals; } - this.throw_exception = other.throw_exception; + if (other.isSetNew_part()) { + this.new_part = new Partition(other.new_part); + } } - public partition_name_has_valid_characters_args deepCopy() { - return new partition_name_has_valid_characters_args(this); + public rename_partition_args deepCopy() { + return new rename_partition_args(this); } @Override public void clear() { + this.db_name = null; + this.tbl_name = null; this.part_vals = null; - setThrow_exceptionIsSet(false); - this.throw_exception = false; + this.new_part = null; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public int getPart_valsSize() { + return (this.part_vals == null) ? 0 : this.part_vals.size(); + } + + public java.util.Iterator getPart_valsIterator() { + return (this.part_vals == null) ? null : this.part_vals.iterator(); + } + + public void addToPart_vals(String elem) { + if (this.part_vals == null) { + this.part_vals = new ArrayList(); + } + this.part_vals.add(elem); + } + + public List getPart_vals() { + return this.part_vals; + } + + public void setPart_vals(List part_vals) { + this.part_vals = part_vals; + } + + public void unsetPart_vals() { + this.part_vals = null; + } + + /** Returns true if field part_vals is set (has been assigned a value) and false otherwise */ + public boolean isSetPart_vals() { + return this.part_vals != null; + } + + public void setPart_valsIsSet(boolean value) { + if (!value) { + this.part_vals = null; + } + } + + public Partition getNew_part() { + return this.new_part; + } + + public void setNew_part(Partition new_part) { + this.new_part = new_part; + } + + public void unsetNew_part() { + this.new_part = null; + } + + /** Returns true if field new_part is set (has been assigned a value) and false otherwise */ + public boolean isSetNew_part() { + return this.new_part != null; + } + + public void setNew_partIsSet(boolean value) { + if (!value) { + this.new_part = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case PART_VALS: + if (value == null) { + unsetPart_vals(); + } else { + setPart_vals((List)value); + } + break; + + case NEW_PART: + if (value == null) { + unsetNew_part(); + } else { + setNew_part((Partition)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case PART_VALS: + return getPart_vals(); + + case NEW_PART: + return getNew_part(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case PART_VALS: + return isSetPart_vals(); + case NEW_PART: + return isSetNew_part(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof rename_partition_args) + return this.equals((rename_partition_args)that); + return false; + } + + public boolean equals(rename_partition_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_part_vals = true && this.isSetPart_vals(); + boolean that_present_part_vals = true && that.isSetPart_vals(); + if (this_present_part_vals || that_present_part_vals) { + if (!(this_present_part_vals && that_present_part_vals)) + return false; + if (!this.part_vals.equals(that.part_vals)) + return false; + } + + boolean this_present_new_part = true && this.isSetNew_part(); + boolean that_present_new_part = true && that.isSetNew_part(); + if (this_present_new_part || that_present_new_part) { + if (!(this_present_new_part && that_present_new_part)) + return false; + if (!this.new_part.equals(that.new_part)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_part_vals = true && (isSetPart_vals()); + list.add(present_part_vals); + if (present_part_vals) + list.add(part_vals); + + boolean present_new_part = true && (isSetNew_part()); + list.add(present_new_part); + if (present_new_part) + list.add(new_part); + + return list.hashCode(); + } + + @Override + public int compareTo(rename_partition_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPart_vals()).compareTo(other.isSetPart_vals()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPart_vals()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.part_vals, other.part_vals); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetNew_part()).compareTo(other.isSetNew_part()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNew_part()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.new_part, other.new_part); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("rename_partition_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("part_vals:"); + if (this.part_vals == null) { + sb.append("null"); + } else { + sb.append(this.part_vals); + } + first = false; + if (!first) sb.append(", "); + sb.append("new_part:"); + if (this.new_part == null) { + sb.append("null"); + } else { + sb.append(this.new_part); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (new_part != null) { + new_part.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class rename_partition_argsStandardSchemeFactory implements SchemeFactory { + public rename_partition_argsStandardScheme getScheme() { + return new rename_partition_argsStandardScheme(); + } + } + + private static class rename_partition_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PART_VALS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list950.size); + String _elem951; + for (int _i952 = 0; _i952 < _list950.size; ++_i952) + { + _elem951 = iprot.readString(); + struct.part_vals.add(_elem951); + } + iprot.readListEnd(); + } + struct.setPart_valsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // NEW_PART + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.new_part = new Partition(); + struct.new_part.read(iprot); + struct.setNew_partIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.part_vals != null) { + oprot.writeFieldBegin(PART_VALS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); + for (String _iter953 : struct.part_vals) + { + oprot.writeString(_iter953); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.new_part != null) { + oprot.writeFieldBegin(NEW_PART_FIELD_DESC); + struct.new_part.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class rename_partition_argsTupleSchemeFactory implements SchemeFactory { + public rename_partition_argsTupleScheme getScheme() { + return new rename_partition_argsTupleScheme(); + } + } + + private static class rename_partition_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDb_name()) { + optionals.set(0); + } + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetPart_vals()) { + optionals.set(2); + } + if (struct.isSetNew_part()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetPart_vals()) { + { + oprot.writeI32(struct.part_vals.size()); + for (String _iter954 : struct.part_vals) + { + oprot.writeString(_iter954); + } + } + } + if (struct.isSetNew_part()) { + struct.new_part.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list955.size); + String _elem956; + for (int _i957 = 0; _i957 < _list955.size; ++_i957) + { + _elem956 = iprot.readString(); + struct.part_vals.add(_elem956); + } + } + struct.setPart_valsIsSet(true); + } + if (incoming.get(3)) { + struct.new_part = new Partition(); + struct.new_part.read(iprot); + struct.setNew_partIsSet(true); + } + } + } + + } + + public static class rename_partition_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("rename_partition_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new rename_partition_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new rename_partition_resultTupleSchemeFactory()); + } + + private InvalidOperationException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(rename_partition_result.class, metaDataMap); + } + + public rename_partition_result() { + } + + public rename_partition_result( + InvalidOperationException o1, + MetaException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public rename_partition_result(rename_partition_result other) { + if (other.isSetO1()) { + this.o1 = new InvalidOperationException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public rename_partition_result deepCopy() { + return new rename_partition_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public InvalidOperationException getO1() { + return this.o1; + } + + public void setO1(InvalidOperationException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((InvalidOperationException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof rename_partition_result) + return this.equals((rename_partition_result)that); + return false; + } + + public boolean equals(rename_partition_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(rename_partition_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("rename_partition_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class rename_partition_resultStandardSchemeFactory implements SchemeFactory { + public rename_partition_resultStandardScheme getScheme() { + return new rename_partition_resultStandardScheme(); + } + } + + private static class rename_partition_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class rename_partition_resultTupleSchemeFactory implements SchemeFactory { + public rename_partition_resultTupleScheme getScheme() { + return new rename_partition_resultTupleScheme(); + } + } + + private static class rename_partition_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class partition_name_has_valid_characters_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("partition_name_has_valid_characters_args"); + + private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("part_vals", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField THROW_EXCEPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("throw_exception", org.apache.thrift.protocol.TType.BOOL, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new partition_name_has_valid_characters_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new partition_name_has_valid_characters_argsTupleSchemeFactory()); + } + + private List part_vals; // required + private boolean throw_exception; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PART_VALS((short)1, "part_vals"), + THROW_EXCEPTION((short)2, "throw_exception"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PART_VALS + return PART_VALS; + case 2: // THROW_EXCEPTION + return THROW_EXCEPTION; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __THROW_EXCEPTION_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PART_VALS, new org.apache.thrift.meta_data.FieldMetaData("part_vals", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.THROW_EXCEPTION, new org.apache.thrift.meta_data.FieldMetaData("throw_exception", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(partition_name_has_valid_characters_args.class, metaDataMap); + } + + public partition_name_has_valid_characters_args() { + } + + public partition_name_has_valid_characters_args( + List part_vals, + boolean throw_exception) + { + this(); + this.part_vals = part_vals; + this.throw_exception = throw_exception; + setThrow_exceptionIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public partition_name_has_valid_characters_args(partition_name_has_valid_characters_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetPart_vals()) { + List __this__part_vals = new ArrayList(other.part_vals); + this.part_vals = __this__part_vals; + } + this.throw_exception = other.throw_exception; + } + + public partition_name_has_valid_characters_args deepCopy() { + return new partition_name_has_valid_characters_args(this); + } + + @Override + public void clear() { + this.part_vals = null; + setThrow_exceptionIsSet(false); + this.throw_exception = false; } public int getPart_valsSize() { @@ -95235,13 +96593,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list950.size); - String _elem951; - for (int _i952 = 0; _i952 < _list950.size; ++_i952) + org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list958.size); + String _elem959; + for (int _i960 = 0; _i960 < _list958.size; ++_i960) { - _elem951 = iprot.readString(); - struct.part_vals.add(_elem951); + _elem959 = iprot.readString(); + struct.part_vals.add(_elem959); } iprot.readListEnd(); } @@ -95275,9 +96633,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter953 : struct.part_vals) + for (String _iter961 : struct.part_vals) { - oprot.writeString(_iter953); + oprot.writeString(_iter961); } oprot.writeListEnd(); } @@ -95314,9 +96672,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter954 : struct.part_vals) + for (String _iter962 : struct.part_vals) { - oprot.writeString(_iter954); + oprot.writeString(_iter962); } } } @@ -95331,13 +96689,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list955.size); - String _elem956; - for (int _i957 = 0; _i957 < _list955.size; ++_i957) + org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list963.size); + String _elem964; + for (int _i965 = 0; _i965 < _list963.size; ++_i965) { - _elem956 = iprot.readString(); - struct.part_vals.add(_elem956); + _elem964 = iprot.readString(); + struct.part_vals.add(_elem964); } } struct.setPart_valsIsSet(true); @@ -97492,13 +98850,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); - struct.success = new ArrayList(_list958.size); - String _elem959; - for (int _i960 = 0; _i960 < _list958.size; ++_i960) + org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); + struct.success = new ArrayList(_list966.size); + String _elem967; + for (int _i968 = 0; _i968 < _list966.size; ++_i968) { - _elem959 = iprot.readString(); - struct.success.add(_elem959); + _elem967 = iprot.readString(); + struct.success.add(_elem967); } iprot.readListEnd(); } @@ -97533,9 +98891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter961 : struct.success) + for (String _iter969 : struct.success) { - oprot.writeString(_iter961); + oprot.writeString(_iter969); } oprot.writeListEnd(); } @@ -97574,9 +98932,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter962 : struct.success) + for (String _iter970 : struct.success) { - oprot.writeString(_iter962); + oprot.writeString(_iter970); } } } @@ -97591,13 +98949,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list963.size); - String _elem964; - for (int _i965 = 0; _i965 < _list963.size; ++_i965) + org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list971.size); + String _elem972; + for (int _i973 = 0; _i973 < _list971.size; ++_i973) { - _elem964 = iprot.readString(); - struct.success.add(_elem964); + _elem972 = iprot.readString(); + struct.success.add(_elem972); } } struct.setSuccessIsSet(true); @@ -98360,15 +99718,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map966 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map966.size); - String _key967; - String _val968; - for (int _i969 = 0; _i969 < _map966.size; ++_i969) + org.apache.thrift.protocol.TMap _map974 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map974.size); + String _key975; + String _val976; + for (int _i977 = 0; _i977 < _map974.size; ++_i977) { - _key967 = iprot.readString(); - _val968 = iprot.readString(); - struct.success.put(_key967, _val968); + _key975 = iprot.readString(); + _val976 = iprot.readString(); + struct.success.put(_key975, _val976); } iprot.readMapEnd(); } @@ -98403,10 +99761,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter970 : struct.success.entrySet()) + for (Map.Entry _iter978 : struct.success.entrySet()) { - oprot.writeString(_iter970.getKey()); - oprot.writeString(_iter970.getValue()); + oprot.writeString(_iter978.getKey()); + oprot.writeString(_iter978.getValue()); } oprot.writeMapEnd(); } @@ -98445,10 +99803,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter971 : struct.success.entrySet()) + for (Map.Entry _iter979 : struct.success.entrySet()) { - oprot.writeString(_iter971.getKey()); - oprot.writeString(_iter971.getValue()); + oprot.writeString(_iter979.getKey()); + oprot.writeString(_iter979.getValue()); } } } @@ -98463,15 +99821,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map972 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map972.size); - String _key973; - String _val974; - for (int _i975 = 0; _i975 < _map972.size; ++_i975) + org.apache.thrift.protocol.TMap _map980 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map980.size); + String _key981; + String _val982; + for (int _i983 = 0; _i983 < _map980.size; ++_i983) { - _key973 = iprot.readString(); - _val974 = iprot.readString(); - struct.success.put(_key973, _val974); + _key981 = iprot.readString(); + _val982 = iprot.readString(); + struct.success.put(_key981, _val982); } } struct.setSuccessIsSet(true); @@ -99066,15 +100424,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map976 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map976.size); - String _key977; - String _val978; - for (int _i979 = 0; _i979 < _map976.size; ++_i979) + org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map984.size); + String _key985; + String _val986; + for (int _i987 = 0; _i987 < _map984.size; ++_i987) { - _key977 = iprot.readString(); - _val978 = iprot.readString(); - struct.part_vals.put(_key977, _val978); + _key985 = iprot.readString(); + _val986 = iprot.readString(); + struct.part_vals.put(_key985, _val986); } iprot.readMapEnd(); } @@ -99118,10 +100476,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter980 : struct.part_vals.entrySet()) + for (Map.Entry _iter988 : struct.part_vals.entrySet()) { - oprot.writeString(_iter980.getKey()); - oprot.writeString(_iter980.getValue()); + oprot.writeString(_iter988.getKey()); + oprot.writeString(_iter988.getValue()); } oprot.writeMapEnd(); } @@ -99172,10 +100530,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter981 : struct.part_vals.entrySet()) + for (Map.Entry _iter989 : struct.part_vals.entrySet()) { - oprot.writeString(_iter981.getKey()); - oprot.writeString(_iter981.getValue()); + oprot.writeString(_iter989.getKey()); + oprot.writeString(_iter989.getValue()); } } } @@ -99198,15 +100556,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map982 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map982.size); - String _key983; - String _val984; - for (int _i985 = 0; _i985 < _map982.size; ++_i985) + org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map990.size); + String _key991; + String _val992; + for (int _i993 = 0; _i993 < _map990.size; ++_i993) { - _key983 = iprot.readString(); - _val984 = iprot.readString(); - struct.part_vals.put(_key983, _val984); + _key991 = iprot.readString(); + _val992 = iprot.readString(); + struct.part_vals.put(_key991, _val992); } } struct.setPart_valsIsSet(true); @@ -100690,15 +102048,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map986 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map986.size); - String _key987; - String _val988; - for (int _i989 = 0; _i989 < _map986.size; ++_i989) + org.apache.thrift.protocol.TMap _map994 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map994.size); + String _key995; + String _val996; + for (int _i997 = 0; _i997 < _map994.size; ++_i997) { - _key987 = iprot.readString(); - _val988 = iprot.readString(); - struct.part_vals.put(_key987, _val988); + _key995 = iprot.readString(); + _val996 = iprot.readString(); + struct.part_vals.put(_key995, _val996); } iprot.readMapEnd(); } @@ -100742,10 +102100,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter990 : struct.part_vals.entrySet()) + for (Map.Entry _iter998 : struct.part_vals.entrySet()) { - oprot.writeString(_iter990.getKey()); - oprot.writeString(_iter990.getValue()); + oprot.writeString(_iter998.getKey()); + oprot.writeString(_iter998.getValue()); } oprot.writeMapEnd(); } @@ -100796,10 +102154,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter991 : struct.part_vals.entrySet()) + for (Map.Entry _iter999 : struct.part_vals.entrySet()) { - oprot.writeString(_iter991.getKey()); - oprot.writeString(_iter991.getValue()); + oprot.writeString(_iter999.getKey()); + oprot.writeString(_iter999.getValue()); } } } @@ -100822,15 +102180,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map992 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map992.size); - String _key993; - String _val994; - for (int _i995 = 0; _i995 < _map992.size; ++_i995) + org.apache.thrift.protocol.TMap _map1000 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1000.size); + String _key1001; + String _val1002; + for (int _i1003 = 0; _i1003 < _map1000.size; ++_i1003) { - _key993 = iprot.readString(); - _val994 = iprot.readString(); - struct.part_vals.put(_key993, _val994); + _key1001 = iprot.readString(); + _val1002 = iprot.readString(); + struct.part_vals.put(_key1001, _val1002); } } struct.setPart_valsIsSet(true); @@ -107554,14 +108912,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list996 = iprot.readListBegin(); - struct.success = new ArrayList(_list996.size); - Index _elem997; - for (int _i998 = 0; _i998 < _list996.size; ++_i998) + org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); + struct.success = new ArrayList(_list1004.size); + Index _elem1005; + for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) { - _elem997 = new Index(); - _elem997.read(iprot); - struct.success.add(_elem997); + _elem1005 = new Index(); + _elem1005.read(iprot); + struct.success.add(_elem1005); } iprot.readListEnd(); } @@ -107605,9 +108963,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter999 : struct.success) + for (Index _iter1007 : struct.success) { - _iter999.write(oprot); + _iter1007.write(oprot); } oprot.writeListEnd(); } @@ -107654,9 +109012,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1000 : struct.success) + for (Index _iter1008 : struct.success) { - _iter1000.write(oprot); + _iter1008.write(oprot); } } } @@ -107674,14 +109032,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1001 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1001.size); - Index _elem1002; - for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003) + org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1009.size); + Index _elem1010; + for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) { - _elem1002 = new Index(); - _elem1002.read(iprot); - struct.success.add(_elem1002); + _elem1010 = new Index(); + _elem1010.read(iprot); + struct.success.add(_elem1010); } } struct.setSuccessIsSet(true); @@ -108660,13 +110018,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); - struct.success = new ArrayList(_list1004.size); - String _elem1005; - for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) + org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); + struct.success = new ArrayList(_list1012.size); + String _elem1013; + for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) { - _elem1005 = iprot.readString(); - struct.success.add(_elem1005); + _elem1013 = iprot.readString(); + struct.success.add(_elem1013); } iprot.readListEnd(); } @@ -108701,9 +110059,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1007 : struct.success) + for (String _iter1015 : struct.success) { - oprot.writeString(_iter1007); + oprot.writeString(_iter1015); } oprot.writeListEnd(); } @@ -108742,9 +110100,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1008 : struct.success) + for (String _iter1016 : struct.success) { - oprot.writeString(_iter1008); + oprot.writeString(_iter1016); } } } @@ -108759,13 +110117,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1009.size); - String _elem1010; - for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) + org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1017.size); + String _elem1018; + for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) { - _elem1010 = iprot.readString(); - struct.success.add(_elem1010); + _elem1018 = iprot.readString(); + struct.success.add(_elem1018); } } struct.setSuccessIsSet(true); @@ -124500,13 +125858,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); - struct.success = new ArrayList(_list1012.size); - String _elem1013; - for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) + org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); + struct.success = new ArrayList(_list1020.size); + String _elem1021; + for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) { - _elem1013 = iprot.readString(); - struct.success.add(_elem1013); + _elem1021 = iprot.readString(); + struct.success.add(_elem1021); } iprot.readListEnd(); } @@ -124541,9 +125899,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1015 : struct.success) + for (String _iter1023 : struct.success) { - oprot.writeString(_iter1015); + oprot.writeString(_iter1023); } oprot.writeListEnd(); } @@ -124582,9 +125940,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1016 : struct.success) + for (String _iter1024 : struct.success) { - oprot.writeString(_iter1016); + oprot.writeString(_iter1024); } } } @@ -124599,13 +125957,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1017.size); - String _elem1018; - for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) + org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1025.size); + String _elem1026; + for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) { - _elem1018 = iprot.readString(); - struct.success.add(_elem1018); + _elem1026 = iprot.readString(); + struct.success.add(_elem1026); } } struct.setSuccessIsSet(true); @@ -128660,13 +130018,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); - struct.success = new ArrayList(_list1020.size); - String _elem1021; - for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) + org.apache.thrift.protocol.TList _list1028 = iprot.readListBegin(); + struct.success = new ArrayList(_list1028.size); + String _elem1029; + for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030) { - _elem1021 = iprot.readString(); - struct.success.add(_elem1021); + _elem1029 = iprot.readString(); + struct.success.add(_elem1029); } iprot.readListEnd(); } @@ -128701,9 +130059,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1023 : struct.success) + for (String _iter1031 : struct.success) { - oprot.writeString(_iter1023); + oprot.writeString(_iter1031); } oprot.writeListEnd(); } @@ -128742,9 +130100,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1024 : struct.success) + for (String _iter1032 : struct.success) { - oprot.writeString(_iter1024); + oprot.writeString(_iter1032); } } } @@ -128759,13 +130117,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1025.size); - String _elem1026; - for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) + org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1033.size); + String _elem1034; + for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) { - _elem1026 = iprot.readString(); - struct.success.add(_elem1026); + _elem1034 = iprot.readString(); + struct.success.add(_elem1034); } } struct.setSuccessIsSet(true); @@ -132056,14 +133414,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1028 = iprot.readListBegin(); - struct.success = new ArrayList(_list1028.size); - Role _elem1029; - for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030) + org.apache.thrift.protocol.TList _list1036 = iprot.readListBegin(); + struct.success = new ArrayList(_list1036.size); + Role _elem1037; + for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) { - _elem1029 = new Role(); - _elem1029.read(iprot); - struct.success.add(_elem1029); + _elem1037 = new Role(); + _elem1037.read(iprot); + struct.success.add(_elem1037); } iprot.readListEnd(); } @@ -132098,9 +133456,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1031 : struct.success) + for (Role _iter1039 : struct.success) { - _iter1031.write(oprot); + _iter1039.write(oprot); } oprot.writeListEnd(); } @@ -132139,9 +133497,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1032 : struct.success) + for (Role _iter1040 : struct.success) { - _iter1032.write(oprot); + _iter1040.write(oprot); } } } @@ -132156,14 +133514,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1033.size); - Role _elem1034; - for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) + org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1041.size); + Role _elem1042; + for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) { - _elem1034 = new Role(); - _elem1034.read(iprot); - struct.success.add(_elem1034); + _elem1042 = new Role(); + _elem1042.read(iprot); + struct.success.add(_elem1042); } } struct.setSuccessIsSet(true); @@ -135168,13 +136526,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1036 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1036.size); - String _elem1037; - for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) + org.apache.thrift.protocol.TList _list1044 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1044.size); + String _elem1045; + for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046) { - _elem1037 = iprot.readString(); - struct.group_names.add(_elem1037); + _elem1045 = iprot.readString(); + struct.group_names.add(_elem1045); } iprot.readListEnd(); } @@ -135210,9 +136568,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1039 : struct.group_names) + for (String _iter1047 : struct.group_names) { - oprot.writeString(_iter1039); + oprot.writeString(_iter1047); } oprot.writeListEnd(); } @@ -135255,9 +136613,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1040 : struct.group_names) + for (String _iter1048 : struct.group_names) { - oprot.writeString(_iter1040); + oprot.writeString(_iter1048); } } } @@ -135278,13 +136636,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1041.size); - String _elem1042; - for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) + org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1049.size); + String _elem1050; + for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) { - _elem1042 = iprot.readString(); - struct.group_names.add(_elem1042); + _elem1050 = iprot.readString(); + struct.group_names.add(_elem1050); } } struct.setGroup_namesIsSet(true); @@ -136742,14 +138100,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1044 = iprot.readListBegin(); - struct.success = new ArrayList(_list1044.size); - HiveObjectPrivilege _elem1045; - for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046) + org.apache.thrift.protocol.TList _list1052 = iprot.readListBegin(); + struct.success = new ArrayList(_list1052.size); + HiveObjectPrivilege _elem1053; + for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054) { - _elem1045 = new HiveObjectPrivilege(); - _elem1045.read(iprot); - struct.success.add(_elem1045); + _elem1053 = new HiveObjectPrivilege(); + _elem1053.read(iprot); + struct.success.add(_elem1053); } iprot.readListEnd(); } @@ -136784,9 +138142,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1047 : struct.success) + for (HiveObjectPrivilege _iter1055 : struct.success) { - _iter1047.write(oprot); + _iter1055.write(oprot); } oprot.writeListEnd(); } @@ -136825,9 +138183,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1048 : struct.success) + for (HiveObjectPrivilege _iter1056 : struct.success) { - _iter1048.write(oprot); + _iter1056.write(oprot); } } } @@ -136842,14 +138200,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1049.size); - HiveObjectPrivilege _elem1050; - for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) + org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1057.size); + HiveObjectPrivilege _elem1058; + for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059) { - _elem1050 = new HiveObjectPrivilege(); - _elem1050.read(iprot); - struct.success.add(_elem1050); + _elem1058 = new HiveObjectPrivilege(); + _elem1058.read(iprot); + struct.success.add(_elem1058); } } struct.setSuccessIsSet(true); @@ -139751,13 +141109,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1052 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1052.size); - String _elem1053; - for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054) + org.apache.thrift.protocol.TList _list1060 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1060.size); + String _elem1061; + for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062) { - _elem1053 = iprot.readString(); - struct.group_names.add(_elem1053); + _elem1061 = iprot.readString(); + struct.group_names.add(_elem1061); } iprot.readListEnd(); } @@ -139788,9 +141146,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1055 : struct.group_names) + for (String _iter1063 : struct.group_names) { - oprot.writeString(_iter1055); + oprot.writeString(_iter1063); } oprot.writeListEnd(); } @@ -139827,9 +141185,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1056 : struct.group_names) + for (String _iter1064 : struct.group_names) { - oprot.writeString(_iter1056); + oprot.writeString(_iter1064); } } } @@ -139845,13 +141203,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1057.size); - String _elem1058; - for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059) + org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1065.size); + String _elem1066; + for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) { - _elem1058 = iprot.readString(); - struct.group_names.add(_elem1058); + _elem1066 = iprot.readString(); + struct.group_names.add(_elem1066); } } struct.setGroup_namesIsSet(true); @@ -140254,13 +141612,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1060 = iprot.readListBegin(); - struct.success = new ArrayList(_list1060.size); - String _elem1061; - for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062) + org.apache.thrift.protocol.TList _list1068 = iprot.readListBegin(); + struct.success = new ArrayList(_list1068.size); + String _elem1069; + for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) { - _elem1061 = iprot.readString(); - struct.success.add(_elem1061); + _elem1069 = iprot.readString(); + struct.success.add(_elem1069); } iprot.readListEnd(); } @@ -140295,9 +141653,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1063 : struct.success) + for (String _iter1071 : struct.success) { - oprot.writeString(_iter1063); + oprot.writeString(_iter1071); } oprot.writeListEnd(); } @@ -140336,9 +141694,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1064 : struct.success) + for (String _iter1072 : struct.success) { - oprot.writeString(_iter1064); + oprot.writeString(_iter1072); } } } @@ -140353,13 +141711,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1065.size); - String _elem1066; - for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) + org.apache.thrift.protocol.TList _list1073 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1073.size); + String _elem1074; + for (int _i1075 = 0; _i1075 < _list1073.size; ++_i1075) { - _elem1066 = iprot.readString(); - struct.success.add(_elem1066); + _elem1074 = iprot.readString(); + struct.success.add(_elem1074); } } struct.setSuccessIsSet(true); diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index d0289fe..f996282 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -567,6 +567,15 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { /** * @param string $db_name * @param string $tbl_name + * @param \metastore\Partition[] $new_parts + * @param \metastore\EnvironmentContext $environment_context + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context); + /** + * @param string $db_name + * @param string $tbl_name * @param \metastore\Partition $new_part * @param \metastore\EnvironmentContext $environment_context * @throws \metastore\InvalidOperationException @@ -4724,6 +4733,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context) + { + $this->send_alter_partitions_with_environment_context($db_name, $tbl_name, $new_parts, $environment_context); + $this->recv_alter_partitions_with_environment_context(); + } + + public function send_alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context) + { + $args = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_args(); + $args->db_name = $db_name; + $args->tbl_name = $tbl_name; + $args->new_parts = $new_parts; + $args->environment_context = $environment_context; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'alter_partitions_with_environment_context', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('alter_partitions_with_environment_context', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_alter_partitions_with_environment_context() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + return; + } + public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context) { $this->send_alter_partition_with_environment_context($db_name, $tbl_name, $new_part, $environment_context); @@ -25029,6 +25095,285 @@ class ThriftHiveMetastore_alter_partitions_result { } +class ThriftHiveMetastore_alter_partitions_with_environment_context_args { + static $_TSPEC; + + /** + * @var string + */ + public $db_name = null; + /** + * @var string + */ + public $tbl_name = null; + /** + * @var \metastore\Partition[] + */ + public $new_parts = null; + /** + * @var \metastore\EnvironmentContext + */ + public $environment_context = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'new_parts', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\Partition', + ), + ), + 4 => array( + 'var' => 'environment_context', + 'type' => TType::STRUCT, + 'class' => '\metastore\EnvironmentContext', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['new_parts'])) { + $this->new_parts = $vals['new_parts']; + } + if (isset($vals['environment_context'])) { + $this->environment_context = $vals['environment_context']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_partitions_with_environment_context_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::LST) { + $this->new_parts = array(); + $_size832 = 0; + $_etype835 = 0; + $xfer += $input->readListBegin($_etype835, $_size832); + for ($_i836 = 0; $_i836 < $_size832; ++$_i836) + { + $elem837 = null; + $elem837 = new \metastore\Partition(); + $xfer += $elem837->read($input); + $this->new_parts []= $elem837; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->environment_context = new \metastore\EnvironmentContext(); + $xfer += $this->environment_context->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->new_parts !== null) { + if (!is_array($this->new_parts)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('new_parts', TType::LST, 3); + { + $output->writeListBegin(TType::STRUCT, count($this->new_parts)); + { + foreach ($this->new_parts as $iter838) + { + $xfer += $iter838->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->environment_context !== null) { + if (!is_object($this->environment_context)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('environment_context', TType::STRUCT, 4); + $xfer += $this->environment_context->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_alter_partitions_with_environment_context_result { + static $_TSPEC; + + /** + * @var \metastore\InvalidOperationException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_alter_partitions_with_environment_context_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\InvalidOperationException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_alter_partition_with_environment_context_args { static $_TSPEC; @@ -25383,14 +25728,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size832 = 0; - $_etype835 = 0; - $xfer += $input->readListBegin($_etype835, $_size832); - for ($_i836 = 0; $_i836 < $_size832; ++$_i836) + $_size839 = 0; + $_etype842 = 0; + $xfer += $input->readListBegin($_etype842, $_size839); + for ($_i843 = 0; $_i843 < $_size839; ++$_i843) { - $elem837 = null; - $xfer += $input->readString($elem837); - $this->part_vals []= $elem837; + $elem844 = null; + $xfer += $input->readString($elem844); + $this->part_vals []= $elem844; } $xfer += $input->readListEnd(); } else { @@ -25436,9 +25781,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter838) + foreach ($this->part_vals as $iter845) { - $xfer += $output->writeString($iter838); + $xfer += $output->writeString($iter845); } } $output->writeListEnd(); @@ -25623,14 +25968,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size839 = 0; - $_etype842 = 0; - $xfer += $input->readListBegin($_etype842, $_size839); - for ($_i843 = 0; $_i843 < $_size839; ++$_i843) + $_size846 = 0; + $_etype849 = 0; + $xfer += $input->readListBegin($_etype849, $_size846); + for ($_i850 = 0; $_i850 < $_size846; ++$_i850) { - $elem844 = null; - $xfer += $input->readString($elem844); - $this->part_vals []= $elem844; + $elem851 = null; + $xfer += $input->readString($elem851); + $this->part_vals []= $elem851; } $xfer += $input->readListEnd(); } else { @@ -25665,9 +26010,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter845) + foreach ($this->part_vals as $iter852) { - $xfer += $output->writeString($iter845); + $xfer += $output->writeString($iter852); } } $output->writeListEnd(); @@ -26121,14 +26466,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size846 = 0; - $_etype849 = 0; - $xfer += $input->readListBegin($_etype849, $_size846); - for ($_i850 = 0; $_i850 < $_size846; ++$_i850) + $_size853 = 0; + $_etype856 = 0; + $xfer += $input->readListBegin($_etype856, $_size853); + for ($_i857 = 0; $_i857 < $_size853; ++$_i857) { - $elem851 = null; - $xfer += $input->readString($elem851); - $this->success []= $elem851; + $elem858 = null; + $xfer += $input->readString($elem858); + $this->success []= $elem858; } $xfer += $input->readListEnd(); } else { @@ -26164,9 +26509,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter852) + foreach ($this->success as $iter859) { - $xfer += $output->writeString($iter852); + $xfer += $output->writeString($iter859); } } $output->writeListEnd(); @@ -26326,17 +26671,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size853 = 0; - $_ktype854 = 0; - $_vtype855 = 0; - $xfer += $input->readMapBegin($_ktype854, $_vtype855, $_size853); - for ($_i857 = 0; $_i857 < $_size853; ++$_i857) + $_size860 = 0; + $_ktype861 = 0; + $_vtype862 = 0; + $xfer += $input->readMapBegin($_ktype861, $_vtype862, $_size860); + for ($_i864 = 0; $_i864 < $_size860; ++$_i864) { - $key858 = ''; - $val859 = ''; - $xfer += $input->readString($key858); - $xfer += $input->readString($val859); - $this->success[$key858] = $val859; + $key865 = ''; + $val866 = ''; + $xfer += $input->readString($key865); + $xfer += $input->readString($val866); + $this->success[$key865] = $val866; } $xfer += $input->readMapEnd(); } else { @@ -26372,10 +26717,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter860 => $viter861) + foreach ($this->success as $kiter867 => $viter868) { - $xfer += $output->writeString($kiter860); - $xfer += $output->writeString($viter861); + $xfer += $output->writeString($kiter867); + $xfer += $output->writeString($viter868); } } $output->writeMapEnd(); @@ -26495,17 +26840,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size862 = 0; - $_ktype863 = 0; - $_vtype864 = 0; - $xfer += $input->readMapBegin($_ktype863, $_vtype864, $_size862); - for ($_i866 = 0; $_i866 < $_size862; ++$_i866) + $_size869 = 0; + $_ktype870 = 0; + $_vtype871 = 0; + $xfer += $input->readMapBegin($_ktype870, $_vtype871, $_size869); + for ($_i873 = 0; $_i873 < $_size869; ++$_i873) { - $key867 = ''; - $val868 = ''; - $xfer += $input->readString($key867); - $xfer += $input->readString($val868); - $this->part_vals[$key867] = $val868; + $key874 = ''; + $val875 = ''; + $xfer += $input->readString($key874); + $xfer += $input->readString($val875); + $this->part_vals[$key874] = $val875; } $xfer += $input->readMapEnd(); } else { @@ -26550,10 +26895,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter869 => $viter870) + foreach ($this->part_vals as $kiter876 => $viter877) { - $xfer += $output->writeString($kiter869); - $xfer += $output->writeString($viter870); + $xfer += $output->writeString($kiter876); + $xfer += $output->writeString($viter877); } } $output->writeMapEnd(); @@ -26875,17 +27220,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size871 = 0; - $_ktype872 = 0; - $_vtype873 = 0; - $xfer += $input->readMapBegin($_ktype872, $_vtype873, $_size871); - for ($_i875 = 0; $_i875 < $_size871; ++$_i875) + $_size878 = 0; + $_ktype879 = 0; + $_vtype880 = 0; + $xfer += $input->readMapBegin($_ktype879, $_vtype880, $_size878); + for ($_i882 = 0; $_i882 < $_size878; ++$_i882) { - $key876 = ''; - $val877 = ''; - $xfer += $input->readString($key876); - $xfer += $input->readString($val877); - $this->part_vals[$key876] = $val877; + $key883 = ''; + $val884 = ''; + $xfer += $input->readString($key883); + $xfer += $input->readString($val884); + $this->part_vals[$key883] = $val884; } $xfer += $input->readMapEnd(); } else { @@ -26930,10 +27275,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter878 => $viter879) + foreach ($this->part_vals as $kiter885 => $viter886) { - $xfer += $output->writeString($kiter878); - $xfer += $output->writeString($viter879); + $xfer += $output->writeString($kiter885); + $xfer += $output->writeString($viter886); } } $output->writeMapEnd(); @@ -28407,15 +28752,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size880 = 0; - $_etype883 = 0; - $xfer += $input->readListBegin($_etype883, $_size880); - for ($_i884 = 0; $_i884 < $_size880; ++$_i884) + $_size887 = 0; + $_etype890 = 0; + $xfer += $input->readListBegin($_etype890, $_size887); + for ($_i891 = 0; $_i891 < $_size887; ++$_i891) { - $elem885 = null; - $elem885 = new \metastore\Index(); - $xfer += $elem885->read($input); - $this->success []= $elem885; + $elem892 = null; + $elem892 = new \metastore\Index(); + $xfer += $elem892->read($input); + $this->success []= $elem892; } $xfer += $input->readListEnd(); } else { @@ -28459,9 +28804,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter886) + foreach ($this->success as $iter893) { - $xfer += $iter886->write($output); + $xfer += $iter893->write($output); } } $output->writeListEnd(); @@ -28668,14 +29013,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size887 = 0; - $_etype890 = 0; - $xfer += $input->readListBegin($_etype890, $_size887); - for ($_i891 = 0; $_i891 < $_size887; ++$_i891) + $_size894 = 0; + $_etype897 = 0; + $xfer += $input->readListBegin($_etype897, $_size894); + for ($_i898 = 0; $_i898 < $_size894; ++$_i898) { - $elem892 = null; - $xfer += $input->readString($elem892); - $this->success []= $elem892; + $elem899 = null; + $xfer += $input->readString($elem899); + $this->success []= $elem899; } $xfer += $input->readListEnd(); } else { @@ -28711,9 +29056,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter893) + foreach ($this->success as $iter900) { - $xfer += $output->writeString($iter893); + $xfer += $output->writeString($iter900); } } $output->writeListEnd(); @@ -32187,14 +32532,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size894 = 0; - $_etype897 = 0; - $xfer += $input->readListBegin($_etype897, $_size894); - for ($_i898 = 0; $_i898 < $_size894; ++$_i898) + $_size901 = 0; + $_etype904 = 0; + $xfer += $input->readListBegin($_etype904, $_size901); + for ($_i905 = 0; $_i905 < $_size901; ++$_i905) { - $elem899 = null; - $xfer += $input->readString($elem899); - $this->success []= $elem899; + $elem906 = null; + $xfer += $input->readString($elem906); + $this->success []= $elem906; } $xfer += $input->readListEnd(); } else { @@ -32230,9 +32575,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter900) + foreach ($this->success as $iter907) { - $xfer += $output->writeString($iter900); + $xfer += $output->writeString($iter907); } } $output->writeListEnd(); @@ -33101,14 +33446,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size901 = 0; - $_etype904 = 0; - $xfer += $input->readListBegin($_etype904, $_size901); - for ($_i905 = 0; $_i905 < $_size901; ++$_i905) + $_size908 = 0; + $_etype911 = 0; + $xfer += $input->readListBegin($_etype911, $_size908); + for ($_i912 = 0; $_i912 < $_size908; ++$_i912) { - $elem906 = null; - $xfer += $input->readString($elem906); - $this->success []= $elem906; + $elem913 = null; + $xfer += $input->readString($elem913); + $this->success []= $elem913; } $xfer += $input->readListEnd(); } else { @@ -33144,9 +33489,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter907) + foreach ($this->success as $iter914) { - $xfer += $output->writeString($iter907); + $xfer += $output->writeString($iter914); } } $output->writeListEnd(); @@ -33837,15 +34182,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size908 = 0; - $_etype911 = 0; - $xfer += $input->readListBegin($_etype911, $_size908); - for ($_i912 = 0; $_i912 < $_size908; ++$_i912) + $_size915 = 0; + $_etype918 = 0; + $xfer += $input->readListBegin($_etype918, $_size915); + for ($_i919 = 0; $_i919 < $_size915; ++$_i919) { - $elem913 = null; - $elem913 = new \metastore\Role(); - $xfer += $elem913->read($input); - $this->success []= $elem913; + $elem920 = null; + $elem920 = new \metastore\Role(); + $xfer += $elem920->read($input); + $this->success []= $elem920; } $xfer += $input->readListEnd(); } else { @@ -33881,9 +34226,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter914) + foreach ($this->success as $iter921) { - $xfer += $iter914->write($output); + $xfer += $iter921->write($output); } } $output->writeListEnd(); @@ -34545,14 +34890,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size915 = 0; - $_etype918 = 0; - $xfer += $input->readListBegin($_etype918, $_size915); - for ($_i919 = 0; $_i919 < $_size915; ++$_i919) + $_size922 = 0; + $_etype925 = 0; + $xfer += $input->readListBegin($_etype925, $_size922); + for ($_i926 = 0; $_i926 < $_size922; ++$_i926) { - $elem920 = null; - $xfer += $input->readString($elem920); - $this->group_names []= $elem920; + $elem927 = null; + $xfer += $input->readString($elem927); + $this->group_names []= $elem927; } $xfer += $input->readListEnd(); } else { @@ -34593,9 +34938,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter921) + foreach ($this->group_names as $iter928) { - $xfer += $output->writeString($iter921); + $xfer += $output->writeString($iter928); } } $output->writeListEnd(); @@ -34903,15 +35248,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size922 = 0; - $_etype925 = 0; - $xfer += $input->readListBegin($_etype925, $_size922); - for ($_i926 = 0; $_i926 < $_size922; ++$_i926) + $_size929 = 0; + $_etype932 = 0; + $xfer += $input->readListBegin($_etype932, $_size929); + for ($_i933 = 0; $_i933 < $_size929; ++$_i933) { - $elem927 = null; - $elem927 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem927->read($input); - $this->success []= $elem927; + $elem934 = null; + $elem934 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem934->read($input); + $this->success []= $elem934; } $xfer += $input->readListEnd(); } else { @@ -34947,9 +35292,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter928) + foreach ($this->success as $iter935) { - $xfer += $iter928->write($output); + $xfer += $iter935->write($output); } } $output->writeListEnd(); @@ -35581,14 +35926,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size929 = 0; - $_etype932 = 0; - $xfer += $input->readListBegin($_etype932, $_size929); - for ($_i933 = 0; $_i933 < $_size929; ++$_i933) + $_size936 = 0; + $_etype939 = 0; + $xfer += $input->readListBegin($_etype939, $_size936); + for ($_i940 = 0; $_i940 < $_size936; ++$_i940) { - $elem934 = null; - $xfer += $input->readString($elem934); - $this->group_names []= $elem934; + $elem941 = null; + $xfer += $input->readString($elem941); + $this->group_names []= $elem941; } $xfer += $input->readListEnd(); } else { @@ -35621,9 +35966,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter935) + foreach ($this->group_names as $iter942) { - $xfer += $output->writeString($iter935); + $xfer += $output->writeString($iter942); } } $output->writeListEnd(); @@ -35699,14 +36044,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size936 = 0; - $_etype939 = 0; - $xfer += $input->readListBegin($_etype939, $_size936); - for ($_i940 = 0; $_i940 < $_size936; ++$_i940) + $_size943 = 0; + $_etype946 = 0; + $xfer += $input->readListBegin($_etype946, $_size943); + for ($_i947 = 0; $_i947 < $_size943; ++$_i947) { - $elem941 = null; - $xfer += $input->readString($elem941); - $this->success []= $elem941; + $elem948 = null; + $xfer += $input->readString($elem948); + $this->success []= $elem948; } $xfer += $input->readListEnd(); } else { @@ -35742,9 +36087,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter942) + foreach ($this->success as $iter949) { - $xfer += $output->writeString($iter942); + $xfer += $output->writeString($iter949); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 407f73c..cc2e07f 100755 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -86,6 +86,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_partitions_by_names(string db_name, string tbl_name, names)') print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') print(' void alter_partitions(string db_name, string tbl_name, new_parts)') + print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)') print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)') print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)') print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)') @@ -598,6 +599,12 @@ elif cmd == 'alter_partitions': sys.exit(1) pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),)) +elif cmd == 'alter_partitions_with_environment_context': + if len(args) != 4: + print('alter_partitions_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + elif cmd == 'alter_partition_with_environment_context': if len(args) != 4: print('alter_partition_with_environment_context requires 4 args') diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 26d8a02..6153c3d 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -560,6 +560,16 @@ def alter_partitions(self, db_name, tbl_name, new_parts): """ pass + def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + """ + Parameters: + - db_name + - tbl_name + - new_parts + - environment_context + """ + pass + def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): """ Parameters: @@ -3484,6 +3494,45 @@ def recv_alter_partitions(self): raise result.o2 return + def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + """ + Parameters: + - db_name + - tbl_name + - new_parts + - environment_context + """ + self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) + self.recv_alter_partitions_with_environment_context() + + def send_alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context): + self._oprot.writeMessageBegin('alter_partitions_with_environment_context', TMessageType.CALL, self._seqid) + args = alter_partitions_with_environment_context_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.new_parts = new_parts + args.environment_context = environment_context + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_alter_partitions_with_environment_context(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = alter_partitions_with_environment_context_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context): """ Parameters: @@ -6025,6 +6074,7 @@ def __init__(self, handler): self._processMap["get_partitions_by_names"] = Processor.process_get_partitions_by_names self._processMap["alter_partition"] = Processor.process_alter_partition self._processMap["alter_partitions"] = Processor.process_alter_partitions + self._processMap["alter_partitions_with_environment_context"] = Processor.process_alter_partitions_with_environment_context self._processMap["alter_partition_with_environment_context"] = Processor.process_alter_partition_with_environment_context self._processMap["rename_partition"] = Processor.process_rename_partition self._processMap["partition_name_has_valid_characters"] = Processor.process_partition_name_has_valid_characters @@ -7713,6 +7763,31 @@ def process_alter_partitions(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_alter_partitions_with_environment_context(self, seqid, iprot, oprot): + args = alter_partitions_with_environment_context_args() + args.read(iprot) + iprot.readMessageEnd() + result = alter_partitions_with_environment_context_result() + try: + self._handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except InvalidOperationException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("alter_partitions_with_environment_context", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_alter_partition_with_environment_context(self, seqid, iprot, oprot): args = alter_partition_with_environment_context_args() args.read(iprot) @@ -20995,6 +21070,200 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class alter_partitions_with_environment_context_args: + """ + Attributes: + - db_name + - tbl_name + - new_parts + - environment_context + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.LIST, 'new_parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 + (4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4 + ) + + def __init__(self, db_name=None, tbl_name=None, new_parts=None, environment_context=None,): + self.db_name = db_name + self.tbl_name = tbl_name + self.new_parts = new_parts + self.environment_context = environment_context + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.new_parts = [] + (_etype832, _size829) = iprot.readListBegin() + for _i833 in xrange(_size829): + _elem834 = Partition() + _elem834.read(iprot) + self.new_parts.append(_elem834) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.environment_context = EnvironmentContext() + self.environment_context.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_partitions_with_environment_context_args') + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.new_parts is not None: + oprot.writeFieldBegin('new_parts', TType.LIST, 3) + oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) + for iter835 in self.new_parts: + iter835.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.environment_context is not None: + oprot.writeFieldBegin('environment_context', TType.STRUCT, 4) + self.environment_context.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.db_name) + value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.new_parts) + value = (value * 31) ^ hash(self.environment_context) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class alter_partitions_with_environment_context_result: + """ + Attributes: + - o1 + - o2 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = InvalidOperationException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('alter_partitions_with_environment_context_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class alter_partition_with_environment_context_args: """ Attributes: @@ -21226,10 +21495,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype832, _size829) = iprot.readListBegin() - for _i833 in xrange(_size829): - _elem834 = iprot.readString() - self.part_vals.append(_elem834) + (_etype839, _size836) = iprot.readListBegin() + for _i840 in xrange(_size836): + _elem841 = iprot.readString() + self.part_vals.append(_elem841) iprot.readListEnd() else: iprot.skip(ftype) @@ -21260,8 +21529,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter835 in self.part_vals: - oprot.writeString(iter835) + for iter842 in self.part_vals: + oprot.writeString(iter842) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -21403,10 +21672,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype839, _size836) = iprot.readListBegin() - for _i840 in xrange(_size836): - _elem841 = iprot.readString() - self.part_vals.append(_elem841) + (_etype846, _size843) = iprot.readListBegin() + for _i847 in xrange(_size843): + _elem848 = iprot.readString() + self.part_vals.append(_elem848) iprot.readListEnd() else: iprot.skip(ftype) @@ -21428,8 +21697,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter842 in self.part_vals: - oprot.writeString(iter842) + for iter849 in self.part_vals: + oprot.writeString(iter849) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -21787,10 +22056,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype846, _size843) = iprot.readListBegin() - for _i847 in xrange(_size843): - _elem848 = iprot.readString() - self.success.append(_elem848) + (_etype853, _size850) = iprot.readListBegin() + for _i854 in xrange(_size850): + _elem855 = iprot.readString() + self.success.append(_elem855) iprot.readListEnd() else: iprot.skip(ftype) @@ -21813,8 +22082,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter849 in self.success: - oprot.writeString(iter849) + for iter856 in self.success: + oprot.writeString(iter856) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21938,11 +22207,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype851, _vtype852, _size850 ) = iprot.readMapBegin() - for _i854 in xrange(_size850): - _key855 = iprot.readString() - _val856 = iprot.readString() - self.success[_key855] = _val856 + (_ktype858, _vtype859, _size857 ) = iprot.readMapBegin() + for _i861 in xrange(_size857): + _key862 = iprot.readString() + _val863 = iprot.readString() + self.success[_key862] = _val863 iprot.readMapEnd() else: iprot.skip(ftype) @@ -21965,9 +22234,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter857,viter858 in self.success.items(): - oprot.writeString(kiter857) - oprot.writeString(viter858) + for kiter864,viter865 in self.success.items(): + oprot.writeString(kiter864) + oprot.writeString(viter865) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22043,11 +22312,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype860, _vtype861, _size859 ) = iprot.readMapBegin() - for _i863 in xrange(_size859): - _key864 = iprot.readString() - _val865 = iprot.readString() - self.part_vals[_key864] = _val865 + (_ktype867, _vtype868, _size866 ) = iprot.readMapBegin() + for _i870 in xrange(_size866): + _key871 = iprot.readString() + _val872 = iprot.readString() + self.part_vals[_key871] = _val872 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22077,9 +22346,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter866,viter867 in self.part_vals.items(): - oprot.writeString(kiter866) - oprot.writeString(viter867) + for kiter873,viter874 in self.part_vals.items(): + oprot.writeString(kiter873) + oprot.writeString(viter874) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -22293,11 +22562,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype869, _vtype870, _size868 ) = iprot.readMapBegin() - for _i872 in xrange(_size868): - _key873 = iprot.readString() - _val874 = iprot.readString() - self.part_vals[_key873] = _val874 + (_ktype876, _vtype877, _size875 ) = iprot.readMapBegin() + for _i879 in xrange(_size875): + _key880 = iprot.readString() + _val881 = iprot.readString() + self.part_vals[_key880] = _val881 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22327,9 +22596,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter875,viter876 in self.part_vals.items(): - oprot.writeString(kiter875) - oprot.writeString(viter876) + for kiter882,viter883 in self.part_vals.items(): + oprot.writeString(kiter882) + oprot.writeString(viter883) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -23384,11 +23653,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype880, _size877) = iprot.readListBegin() - for _i881 in xrange(_size877): - _elem882 = Index() - _elem882.read(iprot) - self.success.append(_elem882) + (_etype887, _size884) = iprot.readListBegin() + for _i888 in xrange(_size884): + _elem889 = Index() + _elem889.read(iprot) + self.success.append(_elem889) iprot.readListEnd() else: iprot.skip(ftype) @@ -23417,8 +23686,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter883 in self.success: - iter883.write(oprot) + for iter890 in self.success: + iter890.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23573,10 +23842,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype887, _size884) = iprot.readListBegin() - for _i888 in xrange(_size884): - _elem889 = iprot.readString() - self.success.append(_elem889) + (_etype894, _size891) = iprot.readListBegin() + for _i895 in xrange(_size891): + _elem896 = iprot.readString() + self.success.append(_elem896) iprot.readListEnd() else: iprot.skip(ftype) @@ -23599,8 +23868,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter890 in self.success: - oprot.writeString(iter890) + for iter897 in self.success: + oprot.writeString(iter897) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -26148,10 +26417,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype894, _size891) = iprot.readListBegin() - for _i895 in xrange(_size891): - _elem896 = iprot.readString() - self.success.append(_elem896) + (_etype901, _size898) = iprot.readListBegin() + for _i902 in xrange(_size898): + _elem903 = iprot.readString() + self.success.append(_elem903) iprot.readListEnd() else: iprot.skip(ftype) @@ -26174,8 +26443,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter897 in self.success: - oprot.writeString(iter897) + for iter904 in self.success: + oprot.writeString(iter904) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26863,10 +27132,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype901, _size898) = iprot.readListBegin() - for _i902 in xrange(_size898): - _elem903 = iprot.readString() - self.success.append(_elem903) + (_etype908, _size905) = iprot.readListBegin() + for _i909 in xrange(_size905): + _elem910 = iprot.readString() + self.success.append(_elem910) iprot.readListEnd() else: iprot.skip(ftype) @@ -26889,8 +27158,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter904 in self.success: - oprot.writeString(iter904) + for iter911 in self.success: + oprot.writeString(iter911) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27404,11 +27673,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype908, _size905) = iprot.readListBegin() - for _i909 in xrange(_size905): - _elem910 = Role() - _elem910.read(iprot) - self.success.append(_elem910) + (_etype915, _size912) = iprot.readListBegin() + for _i916 in xrange(_size912): + _elem917 = Role() + _elem917.read(iprot) + self.success.append(_elem917) iprot.readListEnd() else: iprot.skip(ftype) @@ -27431,8 +27700,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter911 in self.success: - iter911.write(oprot) + for iter918 in self.success: + iter918.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27941,10 +28210,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype915, _size912) = iprot.readListBegin() - for _i916 in xrange(_size912): - _elem917 = iprot.readString() - self.group_names.append(_elem917) + (_etype922, _size919) = iprot.readListBegin() + for _i923 in xrange(_size919): + _elem924 = iprot.readString() + self.group_names.append(_elem924) iprot.readListEnd() else: iprot.skip(ftype) @@ -27969,8 +28238,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter918 in self.group_names: - oprot.writeString(iter918) + for iter925 in self.group_names: + oprot.writeString(iter925) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28197,11 +28466,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype922, _size919) = iprot.readListBegin() - for _i923 in xrange(_size919): - _elem924 = HiveObjectPrivilege() - _elem924.read(iprot) - self.success.append(_elem924) + (_etype929, _size926) = iprot.readListBegin() + for _i930 in xrange(_size926): + _elem931 = HiveObjectPrivilege() + _elem931.read(iprot) + self.success.append(_elem931) iprot.readListEnd() else: iprot.skip(ftype) @@ -28224,8 +28493,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter925 in self.success: - iter925.write(oprot) + for iter932 in self.success: + iter932.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28723,10 +28992,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype929, _size926) = iprot.readListBegin() - for _i930 in xrange(_size926): - _elem931 = iprot.readString() - self.group_names.append(_elem931) + (_etype936, _size933) = iprot.readListBegin() + for _i937 in xrange(_size933): + _elem938 = iprot.readString() + self.group_names.append(_elem938) iprot.readListEnd() else: iprot.skip(ftype) @@ -28747,8 +29016,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter932 in self.group_names: - oprot.writeString(iter932) + for iter939 in self.group_names: + oprot.writeString(iter939) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28803,10 +29072,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype936, _size933) = iprot.readListBegin() - for _i937 in xrange(_size933): - _elem938 = iprot.readString() - self.success.append(_elem938) + (_etype943, _size940) = iprot.readListBegin() + for _i944 in xrange(_size940): + _elem945 = iprot.readString() + self.success.append(_elem945) iprot.readListEnd() else: iprot.skip(ftype) @@ -28829,8 +29098,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter939 in self.success: - oprot.writeString(iter939) + for iter946 in self.success: + oprot.writeString(iter946) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 9b9a27c..0e75245 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1070,6 +1070,22 @@ module ThriftHiveMetastore return end + def alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) + send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) + recv_alter_partitions_with_environment_context() + end + + def send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context) + send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts, :environment_context => environment_context) + end + + def recv_alter_partitions_with_environment_context() + result = receive_message(Alter_partitions_with_environment_context_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + return + end + def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context) send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context) recv_alter_partition_with_environment_context() @@ -3076,6 +3092,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'alter_partitions', seqid) end + def process_alter_partitions_with_environment_context(seqid, iprot, oprot) + args = read_args(iprot, Alter_partitions_with_environment_context_args) + result = Alter_partitions_with_environment_context_result.new() + begin + @handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context) + rescue ::InvalidOperationException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'alter_partitions_with_environment_context', seqid) + end + def process_alter_partition_with_environment_context(seqid, iprot, oprot) args = read_args(iprot, Alter_partition_with_environment_context_args) result = Alter_partition_with_environment_context_result.new() @@ -6362,6 +6391,46 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Alter_partitions_with_environment_context_args + include ::Thrift::Struct, ::Thrift::Struct_Union + DB_NAME = 1 + TBL_NAME = 2 + NEW_PARTS = 3 + ENVIRONMENT_CONTEXT = 4 + + FIELDS = { + DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, + ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Alter_partitions_with_environment_context_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Alter_partition_with_environment_context_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java index b565304..dedd449 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -33,29 +34,6 @@ public interface AlterHandler extends Configurable { /** - * handles alter table - * - * @param msdb - * object to get metadata - * @param wh - * TODO - * @param dbname - * database of the table being altered - * @param name - * original name of the table being altered. same as - * newTable.tableName if alter op is not a rename. - * @param newTable - * new table object - * @throws InvalidOperationException - * thrown if the newTable object is invalid - * @throws MetaException - * thrown if there is any other error - */ - public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newTable) throws InvalidOperationException, - MetaException; - - /** * handles alter table, the changes could be cascaded to partitions if applicable * * @param msdb @@ -77,7 +55,7 @@ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, * thrown if there is any other error */ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newTable, boolean cascade) throws InvalidOperationException, + String name, Table newTable, EnvironmentContext envContext) throws InvalidOperationException, MetaException; /** @@ -101,7 +79,7 @@ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, * @throws MetaException */ public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part) + final String name, final List part_vals, final Partition new_part, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; @@ -124,7 +102,7 @@ public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, fina * @throws MetaException */ public abstract List alterPartitions(final RawStore msdb, Warehouse wh, - final String dbname, final String name, final List new_part) + final String dbname, final String name, final List new_part, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 628c37d..016926b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; import com.google.common.collect.Lists; + import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -26,11 +27,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -71,13 +74,11 @@ public void setConf(Configuration conf) { @Override public void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newt) throws InvalidOperationException, MetaException { - alterTable(msdb, wh, dbname, name, newt, false); - } - - @Override - public void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newt, boolean cascade) throws InvalidOperationException, MetaException { + String name, Table newt, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException { + final boolean cascade = environmentContext != null + && environmentContext.isSetProperties() + && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get( + StatsSetupConst.CASCADE)); if (newt == null) { throw new InvalidOperationException("New table is invalid: " + newt); } @@ -229,12 +230,12 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, msdb.alterPartition(dbname, name, part.getValues(), part); } } - } else if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt) && + } else if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt, environmentContext) && (newt.getPartitionKeysSize() == 0)) { Database db = msdb.getDatabase(newt.getDbName()); // Update table stats. For partitioned table, we update stats in // alterPartition() - MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true); + MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext); } updateTableColumnStatsForAlterTable(msdb, oldt, newt); // now finally call alter table @@ -318,7 +319,7 @@ String getSimpleMessage(IOException ex) { } @Override public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part) + final String name, final List part_vals, final Partition new_part, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { boolean success = false; @@ -344,8 +345,8 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String if (part_vals == null || part_vals.size() == 0) { try { oldPart = msdb.getPartition(dbname, name, new_part.getValues()); - if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl)) { - MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true); + if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) { + MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext); } updatePartColumnStats(msdb, dbname, name, new_part.getValues(), new_part); msdb.alterPartition(dbname, name, new_part.getValues(), new_part); @@ -436,8 +437,8 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String + tbl.getTableName() + " " + new_part.getValues()); } new_part.getSd().setLocation(newPartLoc); - if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl)) { - MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true); + if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) { + MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext); } String oldPartName = Warehouse.makePartName(tbl.getPartitionKeys(), oldPart.getValues()); try { @@ -492,7 +493,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String @Override public List alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List new_parts) + final String name, final List new_parts, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { List oldParts = new ArrayList(); @@ -512,8 +513,8 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); - if (MetaStoreUtils.requireCalStats(hiveConf, oldTmpPart, tmpPart, tbl)) { - MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true); + if (MetaStoreUtils.requireCalStats(hiveConf, oldTmpPart, tmpPart, tbl, environmentContext)) { + MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true, environmentContext); } updatePartColumnStats(msdb, dbname, name, oldTmpPart.getValues(), tmpPart); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index dde253a..2c8f8a2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JvmPauseMonitor; import org.apache.hadoop.hive.common.LogUtils; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; @@ -1477,7 +1478,7 @@ private void create_table_core(final RawStore ms, final Table tbl, } if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && !MetaStoreUtils.isView(tbl)) { - MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir); + MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext); } // set create time @@ -2084,7 +2085,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && !MetaStoreUtils.isView(tbl)) { - MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir); + MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, envContext); } success = ms.addPartition(part); @@ -2512,7 +2513,7 @@ private void initializeAddedPartition( final Table tbl, final PartitionSpecProxy.PartitionIterator part, boolean madeDir) throws MetaException { if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && !MetaStoreUtils.isView(tbl)) { - MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false); + MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false, null); } // set create time @@ -3360,7 +3361,7 @@ private void rename_partition(final String db_name, final String tbl_name, partitionValidationPattern); } - oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part); + oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part, envContext); // Only fetch the table if we actually have a listener Table table = null; @@ -3398,7 +3399,14 @@ private void rename_partition(final String db_name, final String tbl_name, @Override public void alter_partitions(final String db_name, final String tbl_name, - final List new_parts) + final List new_parts) throws InvalidOperationException, MetaException, + TException { + alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null); + } + + @Override + public void alter_partitions_with_environment_context(final String db_name, final String tbl_name, + final List new_parts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { @@ -3417,7 +3425,7 @@ public void alter_partitions(final String db_name, final String tbl_name, for (Partition tmpPart : new_parts) { firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this)); } - oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts); + oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts, environmentContext); Iterator olditr = oldParts.iterator(); // Only fetch the table if we have a listener that needs it. @@ -3514,15 +3522,19 @@ public void alter_table(final String dbname, final String name, final Table newTable) throws InvalidOperationException, MetaException { // Do not set an environment context. - alter_table_core(dbname,name, newTable, null, false); + alter_table_core(dbname,name, newTable, null); } @Override public void alter_table_with_cascade(final String dbname, final String name, final Table newTable, final boolean cascade) throws InvalidOperationException, MetaException { - // Do not set an environment context. - alter_table_core(dbname,name, newTable, null, cascade); + EnvironmentContext envContext = null; + if (cascade) { + envContext = new EnvironmentContext(); + envContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + alter_table_core(dbname, name, newTable, envContext); } @Override @@ -3530,15 +3542,14 @@ public void alter_table_with_environment_context(final String dbname, final String name, final Table newTable, final EnvironmentContext envContext) throws InvalidOperationException, MetaException { - alter_table_core(dbname, name, newTable, envContext, false); + alter_table_core(dbname, name, newTable, envContext); } private void alter_table_core(final String dbname, final String name, final Table newTable, - final EnvironmentContext envContext, final boolean cascade) + final EnvironmentContext envContext) throws InvalidOperationException, MetaException { startFunction("alter_table", ": db=" + dbname + " tbl=" + name + " newtbl=" + newTable.getTableName()); - // Update the time if it hasn't been specified. if (newTable.getParameters() == null || newTable.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) { @@ -3550,7 +3561,7 @@ private void alter_table_core(final String dbname, final String name, final Tabl try { Table oldt = get_table_core(dbname, name); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); - alterHandler.alterTable(getMS(), wh, dbname, name, newTable, cascade); + alterHandler.alterTable(getMS(), wh, dbname, name, newTable, envContext); success = true; for (MetaStoreEventListener listener : listeners) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 0c30262..bd83a3a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; @@ -367,16 +368,10 @@ public void reconnect() throws MetaException { @Override public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, TException { - alter_table(dbname, tbl_name, new_tbl, null); + alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); } - @Override - public void alter_table(String dbname, String tbl_name, Table new_tbl, boolean cascade) - throws InvalidOperationException, MetaException, TException { - client.alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); - } - - public void alter_table(String dbname, String tbl_name, Table new_tbl, + public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); } @@ -1412,15 +1407,15 @@ public int getNumPartitionsByFilter(String db_name, String tbl_name, } @Override - public void alter_partition(String dbName, String tblName, Partition newPart) + public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { - client.alter_partition(dbName, tblName, newPart); + client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); } @Override - public void alter_partitions(String dbName, String tblName, List newParts) + public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { - client.alter_partitions(dbName, tblName, newParts); + client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); } @Override diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 4284d54..a41772f 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.FireEventResponse; @@ -680,9 +681,10 @@ void createTable(Table tbl) throws AlreadyExistsException, void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; - //alter_table_with_cascade - void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws InvalidOperationException, MetaException, TException; + //wrapper of alter_table_with_cascade + void alter_table_with_environmentContext(String defaultDatabaseName, String tblName, Table table, + EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, + TException; void createDatabase(Database db) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; @@ -768,7 +770,7 @@ boolean dropPartition(String db_name, String tbl_name, * @throws TException * if error in communicating with metastore server */ - void alter_partition(String dbName, String tblName, Partition newPart) + void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException; /** @@ -787,7 +789,7 @@ void alter_partition(String dbName, String tblName, Partition newPart) * @throws TException * if error in communicating with metastore server */ - void alter_partitions(String dbName, String tblName, List newParts) + void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException; /** diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index c8859f3..866e1c3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -179,16 +180,16 @@ public static boolean containsAllFastStats(Map partParams) { } public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh, - boolean madeDir) throws MetaException { - return updateTableStatsFast(db, tbl, wh, madeDir, false); + boolean madeDir, EnvironmentContext environmentContext) throws MetaException { + return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext); } public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh, - boolean madeDir, boolean forceRecompute) throws MetaException { + boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { if (tbl.getPartitionKeysSize() == 0) { // Update stats only when unpartitioned FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl); - return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute); + return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext); } else { return false; } @@ -204,8 +205,8 @@ public static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh, * these parameters set * @return true if the stats were updated, false otherwise */ - public static boolean updateTableStatsFast(Table tbl, - FileStatus[] fileStatus, boolean newDir, boolean forceRecompute) throws MetaException { + public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir, + boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { Map params = tbl.getParameters(); @@ -231,12 +232,13 @@ public static boolean updateTableStatsFast(Table tbl, LOG.info("Updating table stats fast for " + tbl.getTableName()); populateQuickStats(fileStatus, params); LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE)); - if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) { - // invalidate stats requiring scan since this is a regular ddl alter case - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - } else { - params.remove(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK); + if (environmentContext != null + && environmentContext.isSetProperties() + && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( + StatsSetupConst.STATS_GENERATED))) { StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); + } else { + StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); } } tbl.setParameters(params); @@ -261,7 +263,7 @@ public static void populateQuickStats(FileStatus[] fileStatus, Map params = part.getParameters(); boolean updated = false; if (forceRecompute || @@ -349,12 +354,13 @@ public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionItera FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation()); populateQuickStats(fileStatus, params); LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE)); - if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) { - // invalidate stats requiring scan since this is a regular ddl alter case - StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); - } else { - params.remove(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK); + if (environmentContext != null + && environmentContext.isSetProperties() + && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( + StatsSetupConst.STATS_GENERATED))) { StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); + } else { + StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); } } part.setParameters(params); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 2e45913..96b801b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.Index; @@ -1102,7 +1103,7 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD tbl.getTTable().setPartitionKeys(newPartitionKeys); try { - db.alterTable(tabName, tbl); + db.alterTable(tabName, tbl, null); } catch (InvalidOperationException e) { throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "Unable to alter " + tabName); } @@ -1130,7 +1131,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) if (touchDesc.getPartSpec() == null) { try { - db.alterTable(touchDesc.getTableName(), tbl); + db.alterTable(touchDesc.getTableName(), tbl, null); } catch (InvalidOperationException e) { throw new HiveException("Uable to update table"); } @@ -1142,7 +1143,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Specified partition does not exist"); } try { - db.alterPartition(touchDesc.getTableName(), part); + db.alterPartition(touchDesc.getTableName(), part, null); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1491,7 +1492,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, authority.toString(), harPartitionDir.getPath()); // make in Path to ensure no slash at the end setArchived(p, harPath, partSpecInfo.values.size()); - db.alterPartition(simpleDesc.getTableName(), p); + db.alterPartition(simpleDesc.getTableName(), p, null); } } catch (Exception e) { throw new HiveException("Unable to change the partition info for HAR", e); @@ -1697,7 +1698,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) for(Partition p: partitions) { setUnArchived(p); try { - db.alterPartition(simpleDesc.getTableName(), p); + db.alterPartition(simpleDesc.getTableName(), p, null); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -3230,9 +3231,9 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { try { if (allPartitions == null) { - db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade()); + db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), alterTbl.getEnvironmentContext()); } else { - db.alterPartitions(tbl.getTableName(), allPartitions); + db.alterPartitions(tbl.getTableName(), allPartitions, alterTbl.getEnvironmentContext()); } } catch (InvalidOperationException e) { LOG.error("alter table: " + stringifyException(e)); @@ -3419,11 +3420,19 @@ private int alterTableOrSinglePartition(AlterTableDesc alterTbl, Table tbl, Part } sd.setCols(alterTbl.getNewCols()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) { - tbl.getTTable().getParameters().putAll(alterTbl.getProps()); + if (part != null) { + part.getTPartition().getParameters().putAll(alterTbl.getProps()); + } else { + tbl.getTTable().getParameters().putAll(alterTbl.getProps()); + } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) { Iterator keyItr = alterTbl.getProps().keySet().iterator(); while (keyItr.hasNext()) { - tbl.getTTable().getParameters().remove(keyItr.next()); + if (part != null) { + part.getTPartition().getParameters().remove(keyItr.next()); + } else { + tbl.getTTable().getParameters().remove(keyItr.next()); + } } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) { StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); @@ -4052,7 +4061,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { if (crtTbl.getReplaceMode()){ // replace-mode creates are really alters using CreateTableDesc. try { - db.alterTable(tbl.getDbName()+"."+tbl.getTableName(),tbl); + db.alterTable(tbl.getDbName()+"."+tbl.getTableName(),tbl,null); } catch (InvalidOperationException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); } @@ -4232,7 +4241,7 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { oldview.setPartCols(crtView.getPartCols()); oldview.checkValidity(null); try { - db.alterTable(crtView.getViewName(), oldview); + db.alterTable(crtView.getViewName(), oldview, null); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -4332,32 +4341,33 @@ private int exchangeTablePartition(Hive db, private List getLocations(Hive db, Table table, Map partSpec) throws HiveException, InvalidOperationException { List locations = new ArrayList(); + EnvironmentContext environmentContext = null; if (partSpec == null) { if (table.isPartitioned()) { for (Partition partition : db.getPartitions(table)) { locations.add(partition.getDataLocation()); - if (needToUpdateStats(partition.getParameters())) { - db.alterPartition(table.getDbName(), table.getTableName(), partition); + if (needToUpdateStats(partition.getParameters(), environmentContext)) { + db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext); } } } else { locations.add(table.getPath()); - if (needToUpdateStats(table.getParameters())) { - db.alterTable(table.getDbName()+"."+table.getTableName(), table); + if (needToUpdateStats(table.getParameters(), environmentContext)) { + db.alterTable(table.getDbName()+"."+table.getTableName(), table, environmentContext); } } } else { for (Partition partition : db.getPartitionsByNames(table, partSpec)) { locations.add(partition.getDataLocation()); - if (needToUpdateStats(partition.getParameters())) { - db.alterPartition(table.getDbName(), table.getTableName(), partition); + if (needToUpdateStats(partition.getParameters(), environmentContext)) { + db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext); } } } return locations; } - private boolean needToUpdateStats(Map props) { + private boolean needToUpdateStats(Map props, EnvironmentContext environmentContext) { if (null == props) { return false; } @@ -4372,7 +4382,10 @@ private boolean needToUpdateStats(Map props) { } //first set basic stats to true StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE); - props.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE); + if (environmentContext == null) { + environmentContext = new EnvironmentContext(); + } + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK); //then invalidate column stats StatsSetupConst.clearColumnStatsState(props); return statsPresent; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index e9cd450..bd1f33f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -601,7 +601,7 @@ private void updatePartitionBucketSortColumns(Table table, Partition partn, } if (updateBucketCols || updateSortCols) { - db.alterPartition(table.getDbName(), table.getTableName(), partn); + db.alterPartition(table.getDbName(), table.getTableName(), partn, null); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java index 14eacdf..b013b93 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsNoJobTask.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; @@ -73,6 +74,7 @@ private static final long serialVersionUID = 1L; private static transient final Logger LOG = LoggerFactory.getLogger(StatsNoJobTask.class); private static ConcurrentMap partUpdates; + private static EnvironmentContext environmentContext; private static Table table; private static String tableFullName; private static JobConf jc = null; @@ -104,6 +106,7 @@ public int execute(DriverContext driverContext) { new ThreadFactoryBuilder().setDaemon(true).setNameFormat("StatsNoJobTask-Thread-%d") .build()); partUpdates = new MapMaker().concurrencyLevel(numThreads).makeMap(); + environmentContext = new EnvironmentContext(); LOG.info("Initialized threadpool for stats computation with " + numThreads + " threads"); } catch (HiveException e) { LOG.error("Cannot get table " + tableName, e); @@ -174,7 +177,10 @@ public void run() { parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize)); parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize)); parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles)); - parameters.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE); + if (!environmentContext.getProperties().containsKey(StatsSetupConst.STATS_GENERATED)) { + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, + StatsSetupConst.TASK); + } partUpdates.put(tPart.getSd().getLocation(), new Partition(table, tPart)); @@ -270,9 +276,10 @@ private int aggregateStats(ExecutorService threadPool) { parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(rawDataSize)); parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(fileSize)); parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(numFiles)); - parameters.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE); + EnvironmentContext environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK); - db.alterTable(tableFullName, new Table(tTable)); + db.alterTable(tableFullName, new Table(tTable), environmentContext); String msg = "Table " + tableFullName + " stats: [" + toString(parameters) + ']'; LOG.debug(msg); @@ -319,7 +326,7 @@ private int updatePartitions() throws InvalidOperationException, HiveException { return -1; } else { LOG.debug("Bulk updating partitions.."); - db.alterPartitions(tableFullName, Lists.newArrayList(partUpdates.values())); + db.alterPartitions(tableFullName, Lists.newArrayList(partUpdates.values()), environmentContext); LOG.debug("Bulk updated " + partUpdates.values().size() + " partitions."); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java index 44277aa..28bc9a7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.ql.DriverContext; @@ -136,6 +137,7 @@ private int aggregateStats() { StatsAggregator statsAggregator = null; int ret = 0; StatsCollectionContext scc = null; + EnvironmentContext environmentContext = null; try { // Stats setup: Warehouse wh = new Warehouse(conf); @@ -179,10 +181,11 @@ private int aggregateStats() { // write table stats to metastore if (!getWork().getNoStatsAggregator()) { - parameters.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE); + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK); } - db.alterTable(tableFullName, new Table(tTable)); + db.alterTable(tableFullName, new Table(tTable), environmentContext); if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { console.printInfo("Table " + tableFullName + " stats: [" + toString(parameters) + ']'); } @@ -216,7 +219,9 @@ private int aggregateStats() { updateQuickStats(wh, parameters, tPart.getSd()); if (!getWork().getNoStatsAggregator()) { - parameters.put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE); + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, + StatsSetupConst.TASK); } updates.add(new Partition(table, tPart)); @@ -228,7 +233,7 @@ private int aggregateStats() { " stats: [" + toString(parameters) + ']'); } if (!updates.isEmpty()) { - db.alterPartitions(tableFullName, updates); + db.alterPartitions(tableFullName, updates, environmentContext); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index 0e8807e..48f3b28 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -66,7 +66,7 @@ public void run(SessionState sess, Set inputs, case TABLE: { Table t = db.getTable(re.getTable().getTableName()); t.setLastAccessTime(lastAccessTime); - db.alterTable(t.getDbName() + "." + t.getTableName(), t); + db.alterTable(t.getDbName() + "." + t.getTableName(), t, null); break; } case PARTITION: { @@ -74,9 +74,9 @@ public void run(SessionState sess, Set inputs, Table t = db.getTable(p.getTable().getTableName()); p = db.getPartition(t, p.getSpec(), false); p.setLastAccessTime(lastAccessTime); - db.alterPartition(t.getTableName(), p); + db.alterPartition(t.getTableName(), p, null); t.setLastAccessTime(lastAccessTime); - db.alterTable(t.getDbName() + "." + t.getTableName(), t); + db.alterTable(t.getDbName() + "." + t.getTableName(), t, null); break; } default: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java index eeb343b..02f7f55 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java @@ -71,13 +71,13 @@ protected int execute(DriverContext driverContext) { FileStatus fstat = fs.getFileStatus(path); part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime())); - db.alterPartition(tbl.getTableName(), part); + db.alterPartition(tbl.getTableName(), part, null); } else { Path url = new Path(tbl.getPath().toString()); FileSystem fs = url.getFileSystem(conf); FileStatus fstat = fs.getFileStatus(url); tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime())); - db.alterTable(tbl.getDbName() + "." + tbl.getTableName(), tbl); + db.alterTable(tbl.getDbName() + "." + tbl.getTableName(), tbl, null); } } catch (Exception e) { e.printStackTrace(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 0bab769..41abc95 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FireEventRequest; import org.apache.hadoop.hive.metastore.api.FireEventRequestData; @@ -549,12 +550,12 @@ public void createTable(String tableName, List columns, List par * if the changes in metadata is not acceptable * @throws TException */ - public void alterTable(String tblName, Table newTbl) + public void alterTable(String tblName, Table newTbl, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { - alterTable(tblName, newTbl, false); + alterTable(tblName, newTbl, false, environmentContext); } - public void alterTable(String tblName, Table newTbl, boolean cascade) + public void alterTable(String tblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); try { @@ -563,7 +564,13 @@ public void alterTable(String tblName, Table newTbl, boolean cascade) newTbl.getParameters().remove(hive_metastoreConstants.DDL_TIME); } newTbl.checkValidity(conf); - getMSC().alter_table(names[0], names[1], newTbl.getTTable(), cascade); + if (environmentContext == null) { + environmentContext = new EnvironmentContext(); + } + if (cascade) { + environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + getMSC().alter_table_with_environmentContext(names[0], names[1], newTbl.getTTable(), environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { @@ -610,10 +617,10 @@ public void alterIndex(String dbName, String baseTblName, String idxName, Index * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartition(String tblName, Partition newPart) + public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); - alterPartition(names[0], names[1], newPart); + alterPartition(names[0], names[1], newPart, environmentContext); } /** @@ -629,11 +636,11 @@ public void alterPartition(String tblName, Partition newPart) * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartition(String dbName, String tblName, Partition newPart) + public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { try { validatePartition(newPart); - getMSC().alter_partition(dbName, tblName, newPart.getTPartition()); + getMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); @@ -661,7 +668,7 @@ private void validatePartition(Partition newPart) throws HiveException { * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartitions(String tblName, List newParts) + public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); List newTParts = @@ -674,7 +681,7 @@ public void alterPartitions(String tblName, List newParts) } newTParts.add(tmpPart.getTPartition()); } - getMSC().alter_partitions(names[0], names[1], newTParts); + getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext); } catch (MetaException e) { throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { @@ -1518,7 +1525,7 @@ public Partition loadPartition(Path loadPath, Table tbl, MetaStoreUtils.populateQuickStats(HiveStatsUtils.getFileStatusRecurse(newPartPath, -1, newPartPath.getFileSystem(conf)), newTPart.getParameters()); getMSC().add_partition(newTPart.getTPartition()); } else { - alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newTPart.getTPartition())); + alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newTPart.getTPartition()), null); } return newTPart; } catch (IOException e) { @@ -1745,10 +1752,12 @@ public void loadTable(Path loadPath, String tableName, boolean replace, throw new HiveException("addFiles: filesystem error in check phase", e); } } - if(!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + EnvironmentContext environmentContext = null; + if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); - } else { - tbl.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, "true"); + } else { + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK); } //column stats will be inaccurate @@ -1769,7 +1778,7 @@ public void loadTable(Path loadPath, String tableName, boolean replace, } try { - alterTable(tableName, tbl); + alterTable(tableName, tbl, environmentContext); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1815,7 +1824,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws out.add(new Partition(tbl, outPart)); } } else { - getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), in); + getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), in, null); List part_names = new ArrayList(); for (org.apache.hadoop.hive.metastore.api.Partition p: in){ part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues())); @@ -2001,7 +2010,7 @@ private void alterPartitionSpec(Table tbl, if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { fullName = tbl.getDbName() + "." + tbl.getTableName(); } - alterPartition(fullName, new Partition(tbl, tpart)); + alterPartition(fullName, new Partition(tbl, tpart), null); } private void alterPartitionSpecInMemory(Table tbl, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 581a919..7d2efb9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -31,6 +31,7 @@ import java.util.regex.Pattern; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -277,7 +278,6 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc return super.getSchema(dbName, tableName); } - @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, TException { org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); @@ -286,10 +286,14 @@ public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.m alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); return; } - super.alter_table(dbname, tbl_name, new_tbl, cascade); + EnvironmentContext environmentContext = null; + if(cascade){ + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + super.alter_table_with_environmentContext(dbname, tbl_name, new_tbl, environmentContext); } - @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { // First try temp table @@ -300,7 +304,7 @@ public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.m } // Try underlying client - super.alter_table(dbname, tbl_name, new_tbl, envContext); + super.alter_table_with_environmentContext(dbname, tbl_name, new_tbl, envContext); } @Override @@ -425,7 +429,7 @@ private void alterTempTable(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table newtCopy = deepCopyAndLowerCaseTable(newt); MetaStoreUtils.updateTableStatsFast(newtCopy, - getWh().getFileStatusesForSD(newtCopy.getSd()), false, true); + getWh().getFileStatusesForSD(newtCopy.getSd()), false, true, envContext); Table newTable = new Table(newtCopy); String newDbName = newTable.getDbName(); String newTableName = newTable.getTableName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a14955a..0c087ed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -28,12 +28,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -98,6 +100,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc; @@ -286,9 +289,11 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) { analyzeAlterTablePartColType(qualified, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { - analyzeAlterTableProps(qualified, ast, false, false); + analyzeAlterTableProps(qualified, null, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) { - analyzeAlterTableProps(qualified, ast, false, true); + analyzeAlterTableProps(qualified, null, ast, false, true); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UPDATESTATS) { + analyzeAlterTableProps(qualified, partSpec, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) { analyzeAltertableSkewedby(qualified, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) { @@ -397,9 +402,9 @@ public void analyzeInternal(ASTNode input) throws SemanticException { String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); ast = (ASTNode) ast.getChild(1); if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) { - analyzeAlterTableProps(qualified, ast, true, false); + analyzeAlterTableProps(qualified, null, ast, true, false); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) { - analyzeAlterTableProps(qualified, ast, true, true); + analyzeAlterTableProps(qualified, null, ast, true, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) { analyzeAlterTableAddParts(qualified, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) { @@ -1338,25 +1343,56 @@ private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expec } } - private void analyzeAlterTableProps(String[] qualified, ASTNode ast, boolean expectView, boolean isUnset) - throws SemanticException { + private void analyzeAlterTableProps(String[] qualified, HashMap partSpec, + ASTNode ast, boolean expectView, boolean isUnset) throws SemanticException { String tableName = getDotName(qualified); HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) .getChild(0)); + EnvironmentContext environmentContext = null; + if (SessionState.get().getCommandType() + .equals(HiveOperation.ALTERTABLE_UPDATETABLESTATS.getOperationName()) + || SessionState.get().getCommandType() + .equals(HiveOperation.ALTERTABLE_UPDATEPARTSTATS.getOperationName())) { + // we need to check if the properties are valid, especially for stats. + boolean changeStatsSucceeded = false; + for (Entry entry : mapProp.entrySet()) { + // we make sure that we do not change anything if there is anything + // wrong. + if (entry.getKey().equals(StatsSetupConst.ROW_COUNT) + || entry.getKey().equals(StatsSetupConst.RAW_DATA_SIZE)) { + try { + Long.parseLong(entry.getValue()); + changeStatsSucceeded = true; + } catch (Exception e) { + throw new SemanticException("AlterTable " + entry.getKey() + " failed with value " + + entry.getValue()); + } + } else { + throw new SemanticException("AlterTable UpdateStats " + entry.getKey() + + " failed because the only valid keys are" + StatsSetupConst.ROW_COUNT + " and " + + StatsSetupConst.RAW_DATA_SIZE); + } + } + if (changeStatsSucceeded) { + environmentContext = new EnvironmentContext(); + environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.USER); + } + } AlterTableDesc alterTblDesc = null; if (isUnset == true) { - alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, expectView); + alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, partSpec, expectView); if (ast.getChild(1) != null) { alterTblDesc.setDropIfExists(true); } } else { - alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, expectView); + alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, partSpec, expectView); } alterTblDesc.setProps(mapProp); + alterTblDesc.setEnvironmentContext(environmentContext); alterTblDesc.setOldName(tableName); - addInputsOutputsAlterTable(tableName, null, alterTblDesc); + addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 5f14c6b..9cca100 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -158,6 +158,7 @@ TOK_ALTERTABLE_UNARCHIVE; TOK_ALTERTABLE_SERDEPROPERTIES; TOK_ALTERTABLE_SERIALIZER; TOK_ALTERTABLE_UPDATECOLSTATS; +TOK_ALTERTABLE_UPDATESTATS; TOK_TABLE_PARTITION; TOK_ALTERTABLE_FILEFORMAT; TOK_ALTERTABLE_LOCATION; @@ -1035,6 +1036,7 @@ alterTblPartitionStatementSuffix | alterStatementSuffixClusterbySortby | alterStatementSuffixCompact | alterStatementSuffixUpdateStatsCol + | alterStatementSuffixUpdateStats | alterStatementSuffixRenameCol | alterStatementSuffixAddCol ; @@ -1121,6 +1123,13 @@ alterStatementSuffixUpdateStatsCol ->^(TOK_ALTERTABLE_UPDATECOLSTATS $colName tableProperties $comment?) ; +alterStatementSuffixUpdateStats +@init { pushMsg("update basic statistics", state); } +@after { popMsg(state); } + : KW_UPDATE KW_STATISTICS KW_SET tableProperties + ->^(TOK_ALTERTABLE_UPDATESTATS tableProperties) + ; + alterStatementChangeColPosition : first=KW_FIRST|KW_AFTER afterCol=identifier ->{$first != null}? ^(TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION ) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 98860c6..7909e70 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -152,6 +152,9 @@ tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS, new HiveOperation[] {HiveOperation.ALTERTABLE_UPDATETABLESTATS, HiveOperation.ALTERTABLE_UPDATEPARTSTATS}); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_UPDATESTATS, + new HiveOperation[] {HiveOperation.ALTERTABLE_UPDATETABLESTATS, + HiveOperation.ALTERTABLE_UPDATEPARTSTATS}); } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 2dabce2..4ba51ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.plan; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -114,6 +115,7 @@ boolean isDropIfExists = false; boolean isTurnOffSorting = false; boolean isCascade = false; + EnvironmentContext environmentContext; public AlterTableDesc() { } @@ -177,15 +179,16 @@ public AlterTableDesc(String name, HashMap partSpec, List partSpec, boolean expectView) { op = alterType; + this.partSpec = partSpec; this.expectView = expectView; } @@ -735,4 +738,12 @@ public static boolean doesAlterTableTypeSupportPartialPartitionSpec(AlterTableTy return alterTableTypesWithPartialSpec.contains(type); } + public EnvironmentContext getEnvironmentContext() { + return environmentContext; + } + + public void setEnvironmentContext(EnvironmentContext environmentContext) { + this.environmentContext = environmentContext; + } + } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index b96de63..16d7d3f 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -588,7 +588,7 @@ public void testAutoPurgeTablesAndPartitions() throws Throwable { Table table = createPartitionedTable(dbName, tableName); table.getParameters().put("auto.purge", "true"); - hm.alterTable(tableName, table); + hm.alterTable(tableName, table, null); Map partitionSpec = new ImmutableMap.Builder() .put("ds", "20141216") diff --git a/ql/src/test/queries/clientnegative/updateBasicStats.q b/ql/src/test/queries/clientnegative/updateBasicStats.q new file mode 100644 index 0000000..b9e642d --- /dev/null +++ b/ql/src/test/queries/clientnegative/updateBasicStats.q @@ -0,0 +1,5 @@ +set hive.mapred.mode=nonstrict; + +create table s as select * from src limit 10; + +alter table s update statistics set ('numRows'='NaN'); diff --git a/ql/src/test/queries/clientpositive/updateBasicStats.q b/ql/src/test/queries/clientpositive/updateBasicStats.q new file mode 100644 index 0000000..daa8029 --- /dev/null +++ b/ql/src/test/queries/clientpositive/updateBasicStats.q @@ -0,0 +1,54 @@ +set hive.mapred.mode=nonstrict; + +create table s as select * from src limit 10; + +explain select * from s; + +alter table s update statistics set('numRows'='12'); + +explain select * from s; + +analyze table s compute statistics; + +explain select * from s; + +alter table s update statistics set('numRows'='1212', 'rawDataSize'='500500'); + +explain select * from s; + +CREATE TABLE calendarp (`year` int) partitioned by (p int); + +insert into table calendarp partition (p=1) values (2010), (2011), (2012); + +explain select * from calendarp where p=1; + +alter table calendarp partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); + +explain select * from calendarp where p=1; + +create table src_stat_part_two(key string, value string) partitioned by (px int, py string); + +insert overwrite table src_stat_part_two partition (px=1, py='a') + select * from src limit 1; + +insert overwrite table src_stat_part_two partition (px=1, py='b') + select * from src limit 10; + +insert overwrite table src_stat_part_two partition (px=2, py='b') + select * from src limit 100; + +explain select * from src_stat_part_two where px=1 and py='a'; + +explain select * from src_stat_part_two where px=1; + +alter table src_stat_part_two partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); + +explain select * from src_stat_part_two where px=1 and py='a'; + +explain select * from src_stat_part_two where px=1; + +alter table src_stat_part_two partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000'); + +explain select * from src_stat_part_two where px=1 and py='a'; + +explain select * from src_stat_part_two where px=1; diff --git a/ql/src/test/results/clientnegative/updateBasicStats.q.out b/ql/src/test/results/clientnegative/updateBasicStats.q.out new file mode 100644 index 0000000..3c4fe39 --- /dev/null +++ b/ql/src/test/results/clientnegative/updateBasicStats.q.out @@ -0,0 +1,11 @@ +PREHOOK: query: create table s as select * from src limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@s +POSTHOOK: query: create table s as select * from src limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@s +FAILED: SemanticException AlterTable numRows failed with value NaN diff --git a/ql/src/test/results/clientpositive/updateBasicStats.q.out b/ql/src/test/results/clientpositive/updateBasicStats.q.out new file mode 100644 index 0000000..3f04b99 --- /dev/null +++ b/ql/src/test/results/clientpositive/updateBasicStats.q.out @@ -0,0 +1,377 @@ +PREHOOK: query: create table s as select * from src limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@s +POSTHOOK: query: create table s as select * from src limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@s +PREHOOK: query: explain select * from s +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: s + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: alter table s update statistics set('numRows'='12') +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@s +PREHOOK: Output: default@s +POSTHOOK: query: alter table s update statistics set('numRows'='12') +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@s +POSTHOOK: Output: default@s +PREHOOK: query: explain select * from s +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: s + Statistics: Num rows: 12 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 104 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: analyze table s compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@s +PREHOOK: Output: default@s +POSTHOOK: query: analyze table s compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@s +POSTHOOK: Output: default@s +PREHOOK: query: explain select * from s +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: s + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: alter table s update statistics set('numRows'='1212', 'rawDataSize'='500500') +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@s +PREHOOK: Output: default@s +POSTHOOK: query: alter table s update statistics set('numRows'='1212', 'rawDataSize'='500500') +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@s +POSTHOOK: Output: default@s +PREHOOK: query: explain select * from s +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from s +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: s + Statistics: Num rows: 1212 Data size: 500500 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1212 Data size: 500500 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: CREATE TABLE calendarp (`year` int) partitioned by (p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@calendarp +POSTHOOK: query: CREATE TABLE calendarp (`year` int) partitioned by (p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@calendarp +PREHOOK: query: insert into table calendarp partition (p=1) values (2010), (2011), (2012) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@calendarp@p=1 +POSTHOOK: query: insert into table calendarp partition (p=1) values (2010), (2011), (2012) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@calendarp@p=1 +POSTHOOK: Lineage: calendarp PARTITION(p=1).year EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: explain select * from calendarp where p=1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from calendarp where p=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: calendarp + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: year (type: int), 1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: alter table calendarp partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS +PREHOOK: Input: default@calendarp +PREHOOK: Output: default@calendarp@p=1 +POSTHOOK: query: alter table calendarp partition (p=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS +POSTHOOK: Input: default@calendarp +POSTHOOK: Input: default@calendarp@p=1 +POSTHOOK: Output: default@calendarp@p=1 +PREHOOK: query: explain select * from calendarp where p=1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from calendarp where p=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: calendarp + Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: year (type: int), 1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: create table src_stat_part_two(key string, value string) partitioned by (px int, py string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_stat_part_two +POSTHOOK: query: create table src_stat_part_two(key string, value string) partitioned by (px int, py string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_stat_part_two +PREHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='a') + select * from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_stat_part_two@px=1/py=a +POSTHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='a') + select * from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_stat_part_two@px=1/py=a +POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=a).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=a).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='b') + select * from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_stat_part_two@px=1/py=b +POSTHOOK: query: insert overwrite table src_stat_part_two partition (px=1, py='b') + select * from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_stat_part_two@px=1/py=b +POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=b).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_two PARTITION(px=1,py=b).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table src_stat_part_two partition (px=2, py='b') + select * from src limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_stat_part_two@px=2/py=b +POSTHOOK: query: insert overwrite table src_stat_part_two partition (px=2, py='b') + select * from src limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_stat_part_two@px=2/py=b +POSTHOOK: Lineage: src_stat_part_two PARTITION(px=2,py=b).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part_two PARTITION(px=2,py=b).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_stat_part_two + Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 1 (type: int), 'a' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: explain select * from src_stat_part_two where px=1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src_stat_part_two where px=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_stat_part_two + Statistics: Num rows: 11 Data size: 115 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 1 (type: int), py (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 11 Data size: 115 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: alter table src_stat_part_two partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS +PREHOOK: Input: default@src_stat_part_two +PREHOOK: Output: default@src_stat_part_two@px=1/py=a +POSTHOOK: query: alter table src_stat_part_two partition (px=1, py='a') update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS +POSTHOOK: Input: default@src_stat_part_two +POSTHOOK: Input: default@src_stat_part_two@px=1/py=a +POSTHOOK: Output: default@src_stat_part_two@px=1/py=a +PREHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_stat_part_two + Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 1 (type: int), 'a' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: explain select * from src_stat_part_two where px=1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src_stat_part_two where px=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_stat_part_two + Statistics: Num rows: 1000020010 Data size: 300040104 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 1 (type: int), py (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1000020010 Data size: 300040104 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: alter table src_stat_part_two partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS +PREHOOK: Input: default@src_stat_part_two +PREHOOK: Output: default@src_stat_part_two@px=1/py=a +PREHOOK: Output: default@src_stat_part_two@px=1/py=b +POSTHOOK: query: alter table src_stat_part_two partition (px=1) update statistics set('numRows'='1000020000', 'rawDataSize'='300040000') +POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS +POSTHOOK: Input: default@src_stat_part_two +POSTHOOK: Input: default@src_stat_part_two@px=1/py=a +POSTHOOK: Input: default@src_stat_part_two@px=1/py=b +POSTHOOK: Output: default@src_stat_part_two@px=1/py=a +POSTHOOK: Output: default@src_stat_part_two@px=1/py=b +PREHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src_stat_part_two where px=1 and py='a' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_stat_part_two + Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 1 (type: int), 'a' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1000020000 Data size: 300040000 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: explain select * from src_stat_part_two where px=1 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from src_stat_part_two where px=1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: src_stat_part_two + Statistics: Num rows: 2000040000 Data size: 600080000 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), 1 (type: int), py (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2000040000 Data size: 600080000 Basic stats: COMPLETE Column stats: NONE + ListSink +