diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 43c53fc..23bdfaa 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2059,6 +2059,13 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "Channel logging level for remote Spark driver. One of {DEBUG, ERROR, INFO, TRACE, WARN}."), SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5", "Name of the SASL mechanism to use for authentication."), + SPARK_DYNAMIC_PARTITION_PRUNING( + "hive.spark.dynamic.partition.pruning", false, + "When dynamic pruning is enabled, joins on partition keys will be processed by writing\n" + + "to a temporary HDFS file, and read later for removing unnecessary partitions."), + SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE( + "hive.spark.dynamic.partition.pruning.max.data.size", 100*1024*1024L, + "Maximum total data size in dynamic pruning."), NWAYJOINREORDER("hive.reorder.nway.joins", true, "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"), HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null), diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 2a5f7e3..f9ab3fe 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -891,6 +891,8 @@ spark.query.files=add_part_multiple.q, \ smb_mapjoin_8.q, \ smb_mapjoin_9.q, \ sort.q, \ + spark_dynamic_partition_pruning.q, \ + spark_dynamic_partition_pruning_2.q, \ stats0.q, \ stats1.q, \ stats10.q, \ diff --git metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index a0b34cb..00400be 100644 --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -50,8 +50,10 @@ uint32_t ThriftHiveMetastore_getMetaConf_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_getMetaConf_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_getMetaConf_args"); + ++fcnt; xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->key); xfer += oprot->writeFieldEnd(); @@ -63,8 +65,10 @@ uint32_t ThriftHiveMetastore_getMetaConf_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_getMetaConf_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_getMetaConf_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->key))); xfer += oprot->writeFieldEnd(); @@ -240,12 +244,15 @@ uint32_t ThriftHiveMetastore_setMetaConf_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_setMetaConf_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_setMetaConf_args"); + ++fcnt; xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->key); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->value); xfer += oprot->writeFieldEnd(); @@ -257,12 +264,15 @@ uint32_t ThriftHiveMetastore_setMetaConf_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_setMetaConf_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_setMetaConf_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->key))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->value))); xfer += oprot->writeFieldEnd(); @@ -410,8 +420,10 @@ uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protoc uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); + ++fcnt; xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->database.write(oprot); xfer += oprot->writeFieldEnd(); @@ -423,8 +435,10 @@ uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::proto uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->database)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -612,8 +626,10 @@ uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_args"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); @@ -625,8 +641,10 @@ uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); @@ -830,16 +848,20 @@ uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_args"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool(this->cascade); xfer += oprot->writeFieldEnd(); @@ -851,16 +873,20 @@ uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool((*(this->cascade))); xfer += oprot->writeFieldEnd(); @@ -1048,8 +1074,10 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); + ++fcnt; xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->pattern); xfer += oprot->writeFieldEnd(); @@ -1061,8 +1089,10 @@ uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->pattern))); xfer += oprot->writeFieldEnd(); @@ -1249,6 +1279,7 @@ uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::prot uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args"); xfer += oprot->writeFieldStop(); @@ -1258,6 +1289,7 @@ uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::pro uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs"); xfer += oprot->writeFieldStop(); @@ -1463,12 +1495,15 @@ uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->db.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1480,12 +1515,15 @@ uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); xfer += (*(this->db)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1653,8 +1691,10 @@ uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TPr uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_args"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); @@ -1666,8 +1706,10 @@ uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); @@ -1855,8 +1897,10 @@ uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_args"); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->type.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1868,8 +1912,10 @@ uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->type)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2077,8 +2123,10 @@ uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_args"); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->type); xfer += oprot->writeFieldEnd(); @@ -2090,8 +2138,10 @@ uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->type))); xfer += oprot->writeFieldEnd(); @@ -2279,8 +2329,10 @@ uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_args"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); @@ -2292,8 +2344,10 @@ uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); @@ -2508,12 +2562,15 @@ uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); @@ -2525,12 +2582,15 @@ uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->table_name))); xfer += oprot->writeFieldEnd(); @@ -2786,16 +2846,20 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::ap uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2807,16 +2871,20 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::a uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->table_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -3064,12 +3132,15 @@ uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); @@ -3081,12 +3152,15 @@ uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->table_name))); xfer += oprot->writeFieldEnd(); @@ -3342,16 +3416,20 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::ap uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3363,16 +3441,20 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::a uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->table_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -3612,8 +3694,10 @@ uint32_t ThriftHiveMetastore_create_table_args::read(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_create_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_args"); + ++fcnt; xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->tbl.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3625,8 +3709,10 @@ uint32_t ThriftHiveMetastore_create_table_args::write(::apache::thrift::protocol uint32_t ThriftHiveMetastore_create_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->tbl)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -3842,12 +3928,15 @@ uint32_t ThriftHiveMetastore_create_table_with_environment_context_args::read(:: uint32_t ThriftHiveMetastore_create_table_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->tbl.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3859,12 +3948,15 @@ uint32_t ThriftHiveMetastore_create_table_with_environment_context_args::write(: uint32_t ThriftHiveMetastore_create_table_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->tbl)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 2); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -4088,16 +4180,20 @@ uint32_t ThriftHiveMetastore_drop_table_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_drop_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); @@ -4109,16 +4205,20 @@ uint32_t ThriftHiveMetastore_drop_table_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_drop_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); @@ -4310,20 +4410,25 @@ uint32_t ThriftHiveMetastore_drop_table_with_environment_context_args::read(::ap uint32_t ThriftHiveMetastore_drop_table_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4335,20 +4440,25 @@ uint32_t ThriftHiveMetastore_drop_table_with_environment_context_args::write(::a uint32_t ThriftHiveMetastore_drop_table_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_table_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -4524,12 +4634,15 @@ uint32_t ThriftHiveMetastore_get_tables_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_get_tables_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->pattern); xfer += oprot->writeFieldEnd(); @@ -4541,12 +4654,15 @@ uint32_t ThriftHiveMetastore_get_tables_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_get_tables_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_tables_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->pattern))); xfer += oprot->writeFieldEnd(); @@ -4746,8 +4862,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_all_tables_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_tables_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); @@ -4759,8 +4877,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_get_all_tables_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_tables_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); @@ -4968,12 +5088,15 @@ uint32_t ThriftHiveMetastore_get_table_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_get_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -4985,12 +5108,15 @@ uint32_t ThriftHiveMetastore_get_table_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_get_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); @@ -5198,12 +5324,15 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_objects_by_name_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); @@ -5223,12 +5352,15 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_objects_by_name_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); @@ -5492,16 +5624,20 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_args::read(::apache::thri uint32_t ThriftHiveMetastore_get_table_names_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_names_by_filter_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_tables", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->max_tables); xfer += oprot->writeFieldEnd(); @@ -5513,16 +5649,20 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_args::write(::apache::thr uint32_t ThriftHiveMetastore_get_table_names_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_names_by_filter_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_tables", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16((*(this->max_tables))); xfer += oprot->writeFieldEnd(); @@ -5778,16 +5918,20 @@ uint32_t ThriftHiveMetastore_alter_table_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_alter_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->new_tbl.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5799,16 +5943,20 @@ uint32_t ThriftHiveMetastore_alter_table_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_alter_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->new_tbl)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -6000,20 +6148,25 @@ uint32_t ThriftHiveMetastore_alter_table_with_environment_context_args::read(::a uint32_t ThriftHiveMetastore_alter_table_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->new_tbl.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6025,20 +6178,25 @@ uint32_t ThriftHiveMetastore_alter_table_with_environment_context_args::write(:: uint32_t ThriftHiveMetastore_alter_table_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->new_tbl)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -6230,20 +6388,25 @@ uint32_t ThriftHiveMetastore_alter_table_with_cascade_args::read(::apache::thrif uint32_t ThriftHiveMetastore_alter_table_with_cascade_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_cascade_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->new_tbl.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->cascade); xfer += oprot->writeFieldEnd(); @@ -6255,20 +6418,25 @@ uint32_t ThriftHiveMetastore_alter_table_with_cascade_args::write(::apache::thri uint32_t ThriftHiveMetastore_alter_table_with_cascade_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_table_with_cascade_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_tbl", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->new_tbl)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool((*(this->cascade))); xfer += oprot->writeFieldEnd(); @@ -6436,8 +6604,10 @@ uint32_t ThriftHiveMetastore_add_partition_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_add_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6449,8 +6619,10 @@ uint32_t ThriftHiveMetastore_add_partition_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_add_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -6666,12 +6838,15 @@ uint32_t ThriftHiveMetastore_add_partition_with_environment_context_args::read(: uint32_t ThriftHiveMetastore_add_partition_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partition_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6683,12 +6858,15 @@ uint32_t ThriftHiveMetastore_add_partition_with_environment_context_args::write( uint32_t ThriftHiveMetastore_add_partition_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partition_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 2); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -6908,8 +7086,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partitions_args"); + ++fcnt; xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); @@ -6929,8 +7109,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partitions_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); @@ -7158,8 +7340,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partitions_pspec_args"); + ++fcnt; xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); @@ -7179,8 +7363,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partitions_pspec_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); @@ -7424,16 +7610,20 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -7453,16 +7643,20 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -7678,8 +7872,10 @@ uint32_t ThriftHiveMetastore_add_partitions_req_args::read(::apache::thrift::pro uint32_t ThriftHiveMetastore_add_partitions_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partitions_req_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -7691,8 +7887,10 @@ uint32_t ThriftHiveMetastore_add_partitions_req_args::write(::apache::thrift::pr uint32_t ThriftHiveMetastore_add_partitions_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_partitions_req_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -7936,16 +8134,20 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -7958,6 +8160,7 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -7969,16 +8172,20 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -7991,6 +8198,7 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -8214,16 +8422,20 @@ uint32_t ThriftHiveMetastore_append_partition_by_name_args::read(::apache::thrif uint32_t ThriftHiveMetastore_append_partition_by_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_by_name_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); @@ -8235,16 +8447,20 @@ uint32_t ThriftHiveMetastore_append_partition_by_name_args::write(::apache::thri uint32_t ThriftHiveMetastore_append_partition_by_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_by_name_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); @@ -8476,20 +8692,25 @@ uint32_t ThriftHiveMetastore_append_partition_by_name_with_environment_context_a uint32_t ThriftHiveMetastore_append_partition_by_name_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_by_name_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -8501,20 +8722,25 @@ uint32_t ThriftHiveMetastore_append_partition_by_name_with_environment_context_a uint32_t ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -8758,16 +8984,20 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -8780,6 +9010,7 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); @@ -8791,16 +9022,20 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -8813,6 +9048,7 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); @@ -9044,16 +9280,20 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -9066,10 +9306,12 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 5); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -9081,16 +9323,20 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -9103,10 +9349,12 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 5); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -9318,20 +9566,25 @@ uint32_t ThriftHiveMetastore_drop_partition_by_name_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_drop_partition_by_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_by_name_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); @@ -9343,20 +9596,25 @@ uint32_t ThriftHiveMetastore_drop_partition_by_name_args::write(::apache::thrift uint32_t ThriftHiveMetastore_drop_partition_by_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_by_name_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); @@ -9576,24 +9834,30 @@ uint32_t ThriftHiveMetastore_drop_partition_by_name_with_environment_context_arg uint32_t ThriftHiveMetastore_drop_partition_by_name_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_by_name_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 5); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -9605,24 +9869,30 @@ uint32_t ThriftHiveMetastore_drop_partition_by_name_with_environment_context_arg uint32_t ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 5); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -9810,8 +10080,10 @@ uint32_t ThriftHiveMetastore_drop_partitions_req_args::read(::apache::thrift::pr uint32_t ThriftHiveMetastore_drop_partitions_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partitions_req_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -9823,8 +10095,10 @@ uint32_t ThriftHiveMetastore_drop_partitions_req_args::write(::apache::thrift::p uint32_t ThriftHiveMetastore_drop_partitions_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_partitions_req_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -10040,16 +10314,20 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -10069,16 +10347,20 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -10321,8 +10603,10 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); @@ -10336,18 +10620,22 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("source_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->source_db); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("source_table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->source_table_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dest_db", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->dest_db); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dest_table_name", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->dest_table_name); xfer += oprot->writeFieldEnd(); @@ -10359,8 +10647,10 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_exchange_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); @@ -10374,18 +10664,22 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("source_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->source_db))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("source_table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->source_table_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dest_db", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString((*(this->dest_db))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dest_table_name", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString((*(this->dest_table_name))); xfer += oprot->writeFieldEnd(); @@ -10669,16 +10963,20 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_with_auth_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -10691,10 +10989,12 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->user_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); @@ -10714,16 +11014,20 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_with_auth_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -10736,10 +11040,12 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString((*(this->user_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); @@ -10951,16 +11257,20 @@ uint32_t ThriftHiveMetastore_get_partition_by_name_args::read(::apache::thrift:: uint32_t ThriftHiveMetastore_get_partition_by_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_by_name_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); @@ -10972,16 +11282,20 @@ uint32_t ThriftHiveMetastore_get_partition_by_name_args::write(::apache::thrift: uint32_t ThriftHiveMetastore_get_partition_by_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_by_name_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); @@ -11185,16 +11499,20 @@ uint32_t ThriftHiveMetastore_get_partitions_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -11206,16 +11524,20 @@ uint32_t ThriftHiveMetastore_get_partitions_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_get_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -11479,24 +11801,30 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_with_auth_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->user_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); @@ -11516,24 +11844,30 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_with_auth_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString((*(this->user_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); @@ -11777,16 +12111,20 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_args::read(::apache::thrift::p uint32_t ThriftHiveMetastore_get_partitions_pspec_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_pspec_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -11798,16 +12136,20 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_args::write(::apache::thrift:: uint32_t ThriftHiveMetastore_get_partitions_pspec_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_pspec_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -12043,16 +12385,20 @@ uint32_t ThriftHiveMetastore_get_partition_names_args::read(::apache::thrift::pr uint32_t ThriftHiveMetastore_get_partition_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -12064,16 +12410,20 @@ uint32_t ThriftHiveMetastore_get_partition_names_args::write(::apache::thrift::p uint32_t ThriftHiveMetastore_get_partition_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -12309,16 +12659,20 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -12331,6 +12685,7 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -12342,16 +12697,20 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -12364,6 +12723,7 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -12647,16 +13007,20 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -12669,14 +13033,17 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->user_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); @@ -12696,16 +13063,20 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_ps_with_auth_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -12718,14 +13089,17 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString((*(this->user_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); @@ -12989,16 +13363,20 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -13011,6 +13389,7 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -13022,16 +13401,20 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -13044,6 +13427,7 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -13287,20 +13671,25 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -13312,20 +13701,25 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -13569,20 +13963,25 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->max_parts); xfer += oprot->writeFieldEnd(); @@ -13594,20 +13993,25 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((*(this->max_parts))); xfer += oprot->writeFieldEnd(); @@ -13827,8 +14231,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -13840,8 +14246,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -14057,16 +14465,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); @@ -14086,16 +14498,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_names_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); @@ -14339,16 +14755,20 @@ uint32_t ThriftHiveMetastore_alter_partition_args::read(::apache::thrift::protoc uint32_t ThriftHiveMetastore_alter_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); @@ -14360,16 +14780,20 @@ uint32_t ThriftHiveMetastore_alter_partition_args::write(::apache::thrift::proto uint32_t ThriftHiveMetastore_alter_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -14565,16 +14989,20 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); @@ -14594,16 +15022,20 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partitions_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); @@ -14803,20 +15235,25 @@ uint32_t ThriftHiveMetastore_alter_partition_with_environment_context_args::read uint32_t ThriftHiveMetastore_alter_partition_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_with_environment_context_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->environment_context.write(oprot); xfer += oprot->writeFieldEnd(); @@ -14828,20 +15265,25 @@ uint32_t ThriftHiveMetastore_alter_partition_with_environment_context_args::writ uint32_t ThriftHiveMetastore_alter_partition_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_partition_with_environment_context_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->environment_context)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -15045,16 +15487,20 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_rename_partition_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -15067,6 +15513,7 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->new_part.write(oprot); xfer += oprot->writeFieldEnd(); @@ -15078,16 +15525,20 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_rename_partition_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -15100,6 +15551,7 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_part", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->new_part)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -15287,8 +15739,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_has_valid_characters_args"); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -15301,6 +15755,7 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("throw_exception", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool(this->throw_exception); xfer += oprot->writeFieldEnd(); @@ -15312,8 +15767,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_has_valid_characters_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -15326,6 +15783,7 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("throw_exception", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool((*(this->throw_exception))); xfer += oprot->writeFieldEnd(); @@ -15501,12 +15959,15 @@ uint32_t ThriftHiveMetastore_get_config_value_args::read(::apache::thrift::proto uint32_t ThriftHiveMetastore_get_config_value_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_config_value_args"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("defaultValue", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->defaultValue); xfer += oprot->writeFieldEnd(); @@ -15518,12 +15979,15 @@ uint32_t ThriftHiveMetastore_get_config_value_args::write(::apache::thrift::prot uint32_t ThriftHiveMetastore_get_config_value_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_config_value_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("defaultValue", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->defaultValue))); xfer += oprot->writeFieldEnd(); @@ -15691,8 +16155,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_partition_name_to_vals_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_to_vals_args"); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); @@ -15704,8 +16170,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_args::write(::apache::thrift uint32_t ThriftHiveMetastore_partition_name_to_vals_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_to_vals_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); @@ -15905,8 +16373,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_partition_name_to_spec_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_to_spec_args"); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); @@ -15918,8 +16388,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_args::write(::apache::thrift uint32_t ThriftHiveMetastore_partition_name_to_spec_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_to_spec_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); @@ -16167,16 +16639,20 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_markPartitionForEvent_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -16190,6 +16666,7 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("eventType", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)this->eventType); xfer += oprot->writeFieldEnd(); @@ -16201,16 +16678,20 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_markPartitionForEvent_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -16224,6 +16705,7 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("eventType", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)(*(this->eventType))); xfer += oprot->writeFieldEnd(); @@ -16512,16 +16994,20 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_isPartitionMarkedForEvent_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); @@ -16535,6 +17021,7 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("eventType", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)this->eventType); xfer += oprot->writeFieldEnd(); @@ -16546,16 +17033,20 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_isPartitionMarkedForEvent_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); @@ -16569,6 +17060,7 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("eventType", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)(*(this->eventType))); xfer += oprot->writeFieldEnd(); @@ -16844,12 +17336,15 @@ uint32_t ThriftHiveMetastore_add_index_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_add_index_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_index_args"); + ++fcnt; xfer += oprot->writeFieldBegin("new_index", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->new_index.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("index_table", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->index_table.write(oprot); xfer += oprot->writeFieldEnd(); @@ -16861,12 +17356,15 @@ uint32_t ThriftHiveMetastore_add_index_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_add_index_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_index_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("new_index", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->new_index)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("index_table", ::apache::thrift::protocol::T_STRUCT, 2); xfer += (*(this->index_table)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -17098,20 +17596,25 @@ uint32_t ThriftHiveMetastore_alter_index_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_alter_index_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_index_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("base_tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->base_tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("idx_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->idx_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_idx", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->new_idx.write(oprot); xfer += oprot->writeFieldEnd(); @@ -17123,20 +17626,25 @@ uint32_t ThriftHiveMetastore_alter_index_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_alter_index_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_index_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("base_tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->base_tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("idx_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->idx_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("new_idx", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->new_idx)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -17328,20 +17836,25 @@ uint32_t ThriftHiveMetastore_drop_index_by_name_args::read(::apache::thrift::pro uint32_t ThriftHiveMetastore_drop_index_by_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_index_by_name_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("index_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->index_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); @@ -17353,20 +17866,25 @@ uint32_t ThriftHiveMetastore_drop_index_by_name_args::write(::apache::thrift::pr uint32_t ThriftHiveMetastore_drop_index_by_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_index_by_name_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("index_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->index_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool((*(this->deleteData))); xfer += oprot->writeFieldEnd(); @@ -17570,16 +18088,20 @@ uint32_t ThriftHiveMetastore_get_index_by_name_args::read(::apache::thrift::prot uint32_t ThriftHiveMetastore_get_index_by_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_index_by_name_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("index_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->index_name); xfer += oprot->writeFieldEnd(); @@ -17591,16 +18113,20 @@ uint32_t ThriftHiveMetastore_get_index_by_name_args::write(::apache::thrift::pro uint32_t ThriftHiveMetastore_get_index_by_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_index_by_name_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("index_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->index_name))); xfer += oprot->writeFieldEnd(); @@ -17804,16 +18330,20 @@ uint32_t ThriftHiveMetastore_get_indexes_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_get_indexes_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_indexes_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_indexes", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->max_indexes); xfer += oprot->writeFieldEnd(); @@ -17825,16 +18355,20 @@ uint32_t ThriftHiveMetastore_get_indexes_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_get_indexes_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_indexes_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_indexes", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16((*(this->max_indexes))); xfer += oprot->writeFieldEnd(); @@ -18070,16 +18604,20 @@ uint32_t ThriftHiveMetastore_get_index_names_args::read(::apache::thrift::protoc uint32_t ThriftHiveMetastore_get_index_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_index_names_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_indexes", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->max_indexes); xfer += oprot->writeFieldEnd(); @@ -18091,16 +18629,20 @@ uint32_t ThriftHiveMetastore_get_index_names_args::write(::apache::thrift::proto uint32_t ThriftHiveMetastore_get_index_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_index_names_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max_indexes", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16((*(this->max_indexes))); xfer += oprot->writeFieldEnd(); @@ -18300,8 +18842,10 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache: uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_args"); + ++fcnt; xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->stats_obj.write(oprot); xfer += oprot->writeFieldEnd(); @@ -18313,8 +18857,10 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->stats_obj)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -18542,8 +19088,10 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apa uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_args"); + ++fcnt; xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->stats_obj.write(oprot); xfer += oprot->writeFieldEnd(); @@ -18555,8 +19103,10 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::ap uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->stats_obj)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -18800,16 +19350,20 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::th uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->col_name); xfer += oprot->writeFieldEnd(); @@ -18821,16 +19375,20 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::t uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->col_name))); xfer += oprot->writeFieldEnd(); @@ -19082,20 +19640,25 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::read(::apache uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->col_name); xfer += oprot->writeFieldEnd(); @@ -19107,20 +19670,25 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::write(::apach uint32_t ThriftHiveMetastore_get_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString((*(this->col_name))); xfer += oprot->writeFieldEnd(); @@ -19348,8 +19916,10 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_args::read(::apache::thrif uint32_t ThriftHiveMetastore_get_table_statistics_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -19361,8 +19931,10 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_args::write(::apache::thri uint32_t ThriftHiveMetastore_get_table_statistics_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -19550,8 +20122,10 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::read(::apache:: uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -19563,8 +20137,10 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::write(::apache: uint32_t ThriftHiveMetastore_get_partitions_statistics_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -19752,8 +20328,10 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::read(::apache::thrift::pro uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -19765,8 +20343,10 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::write(::apache::thrift::pr uint32_t ThriftHiveMetastore_get_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -19954,8 +20534,10 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::read(::apache::thrift::pro uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -19967,8 +20549,10 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::write(::apache::thrift::pr uint32_t ThriftHiveMetastore_set_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -20220,20 +20804,25 @@ uint32_t ThriftHiveMetastore_delete_partition_column_statistics_args::read(::apa uint32_t ThriftHiveMetastore_delete_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_delete_partition_column_statistics_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->part_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->col_name); xfer += oprot->writeFieldEnd(); @@ -20245,20 +20834,25 @@ uint32_t ThriftHiveMetastore_delete_partition_column_statistics_args::write(::ap uint32_t ThriftHiveMetastore_delete_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_delete_partition_column_statistics_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->part_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString((*(this->col_name))); xfer += oprot->writeFieldEnd(); @@ -20502,16 +21096,20 @@ uint32_t ThriftHiveMetastore_delete_table_column_statistics_args::read(::apache: uint32_t ThriftHiveMetastore_delete_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_delete_table_column_statistics_args"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->col_name); xfer += oprot->writeFieldEnd(); @@ -20523,16 +21121,20 @@ uint32_t ThriftHiveMetastore_delete_table_column_statistics_args::write(::apache uint32_t ThriftHiveMetastore_delete_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_delete_table_column_statistics_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->col_name))); xfer += oprot->writeFieldEnd(); @@ -20760,8 +21362,10 @@ uint32_t ThriftHiveMetastore_create_function_args::read(::apache::thrift::protoc uint32_t ThriftHiveMetastore_create_function_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_function_args"); + ++fcnt; xfer += oprot->writeFieldBegin("func", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->func.write(oprot); xfer += oprot->writeFieldEnd(); @@ -20773,8 +21377,10 @@ uint32_t ThriftHiveMetastore_create_function_args::write(::apache::thrift::proto uint32_t ThriftHiveMetastore_create_function_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_function_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("func", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->func)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -20990,12 +21596,15 @@ uint32_t ThriftHiveMetastore_drop_function_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_drop_function_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_function_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("funcName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->funcName); xfer += oprot->writeFieldEnd(); @@ -21007,12 +21616,15 @@ uint32_t ThriftHiveMetastore_drop_function_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_drop_function_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_function_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbName))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("funcName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->funcName))); xfer += oprot->writeFieldEnd(); @@ -21196,16 +21808,20 @@ uint32_t ThriftHiveMetastore_alter_function_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_alter_function_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_function_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("funcName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->funcName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("newFunc", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->newFunc.write(oprot); xfer += oprot->writeFieldEnd(); @@ -21217,16 +21833,20 @@ uint32_t ThriftHiveMetastore_alter_function_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_alter_function_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_function_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbName))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("funcName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->funcName))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("newFunc", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->newFunc)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -21402,12 +22022,15 @@ uint32_t ThriftHiveMetastore_get_functions_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_functions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_functions_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->pattern); xfer += oprot->writeFieldEnd(); @@ -21419,12 +22042,15 @@ uint32_t ThriftHiveMetastore_get_functions_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_functions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_functions_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbName))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->pattern))); xfer += oprot->writeFieldEnd(); @@ -21632,12 +22258,15 @@ uint32_t ThriftHiveMetastore_get_function_args::read(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_get_function_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_function_args"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("funcName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->funcName); xfer += oprot->writeFieldEnd(); @@ -21649,12 +22278,15 @@ uint32_t ThriftHiveMetastore_get_function_args::write(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_function_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_function_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->dbName))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("funcName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->funcName))); xfer += oprot->writeFieldEnd(); @@ -21842,8 +22474,10 @@ uint32_t ThriftHiveMetastore_create_role_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_create_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_args"); + ++fcnt; xfer += oprot->writeFieldBegin("role", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->role.write(oprot); xfer += oprot->writeFieldEnd(); @@ -21855,8 +22489,10 @@ uint32_t ThriftHiveMetastore_create_role_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_create_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_role_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("role", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->role)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -22024,8 +22660,10 @@ uint32_t ThriftHiveMetastore_drop_role_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_drop_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_args"); + ++fcnt; xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->role_name); xfer += oprot->writeFieldEnd(); @@ -22037,8 +22675,10 @@ uint32_t ThriftHiveMetastore_drop_role_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_drop_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_role_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->role_name))); xfer += oprot->writeFieldEnd(); @@ -22193,6 +22833,7 @@ uint32_t ThriftHiveMetastore_get_role_names_args::read(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_role_names_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_args"); xfer += oprot->writeFieldStop(); @@ -22202,6 +22843,7 @@ uint32_t ThriftHiveMetastore_get_role_names_args::write(::apache::thrift::protoc uint32_t ThriftHiveMetastore_get_role_names_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_names_pargs"); xfer += oprot->writeFieldStop(); @@ -22443,28 +23085,35 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_grant_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_args"); + ++fcnt; xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->role_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->principal_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->grantor); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32((int32_t)this->grantorType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grant_option", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool(this->grant_option); xfer += oprot->writeFieldEnd(); @@ -22476,28 +23125,35 @@ uint32_t ThriftHiveMetastore_grant_role_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_grant_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_role_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->role_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->principal_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)(*(this->principal_type))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString((*(this->grantor))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32((int32_t)(*(this->grantorType))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grant_option", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool((*(this->grant_option))); xfer += oprot->writeFieldEnd(); @@ -22683,16 +23339,20 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_revoke_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_args"); + ++fcnt; xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->role_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->principal_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); @@ -22704,16 +23364,20 @@ uint32_t ThriftHiveMetastore_revoke_role_args::write(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_revoke_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_role_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("role_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->role_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->principal_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)(*(this->principal_type))); xfer += oprot->writeFieldEnd(); @@ -22891,12 +23555,15 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_list_roles_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_args"); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->principal_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); @@ -22908,12 +23575,15 @@ uint32_t ThriftHiveMetastore_list_roles_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_list_roles_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_roles_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->principal_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)(*(this->principal_type))); xfer += oprot->writeFieldEnd(); @@ -23113,8 +23783,10 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_args::read(::apache::thrift::prot uint32_t ThriftHiveMetastore_grant_revoke_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -23126,8 +23798,10 @@ uint32_t ThriftHiveMetastore_grant_revoke_role_args::write(::apache::thrift::pro uint32_t ThriftHiveMetastore_grant_revoke_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_role_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -23295,8 +23969,10 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_get_principals_in_role_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -23308,8 +23984,10 @@ uint32_t ThriftHiveMetastore_get_principals_in_role_args::write(::apache::thrift uint32_t ThriftHiveMetastore_get_principals_in_role_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_principals_in_role_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -23477,8 +24155,10 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::read(::apache:: uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -23490,8 +24170,10 @@ uint32_t ThriftHiveMetastore_get_role_grants_for_principal_args::write(::apache: uint32_t ThriftHiveMetastore_get_role_grants_for_principal_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_role_grants_for_principal_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -23687,16 +24369,20 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_args"); + ++fcnt; xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->hiveObject.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->user_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); @@ -23716,16 +24402,20 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_privilege_set_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->hiveObject)).write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->user_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); @@ -23919,16 +24609,20 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc uint32_t ThriftHiveMetastore_list_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_args"); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->principal_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->hiveObject.write(oprot); xfer += oprot->writeFieldEnd(); @@ -23940,16 +24634,20 @@ uint32_t ThriftHiveMetastore_list_privileges_args::write(::apache::thrift::proto uint32_t ThriftHiveMetastore_list_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_list_privileges_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->principal_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)(*(this->principal_type))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 3); xfer += (*(this->hiveObject)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -24149,8 +24847,10 @@ uint32_t ThriftHiveMetastore_grant_privileges_args::read(::apache::thrift::proto uint32_t ThriftHiveMetastore_grant_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_args"); + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); @@ -24162,8 +24862,10 @@ uint32_t ThriftHiveMetastore_grant_privileges_args::write(::apache::thrift::prot uint32_t ThriftHiveMetastore_grant_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_privileges_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->privileges)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -24331,8 +25033,10 @@ uint32_t ThriftHiveMetastore_revoke_privileges_args::read(::apache::thrift::prot uint32_t ThriftHiveMetastore_revoke_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_args"); + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); @@ -24344,8 +25048,10 @@ uint32_t ThriftHiveMetastore_revoke_privileges_args::write(::apache::thrift::pro uint32_t ThriftHiveMetastore_revoke_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_revoke_privileges_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->privileges)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -24513,8 +25219,10 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::read(::apache::thrift uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_args"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); @@ -24526,8 +25234,10 @@ uint32_t ThriftHiveMetastore_grant_revoke_privileges_args::write(::apache::thrif uint32_t ThriftHiveMetastore_grant_revoke_privileges_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_grant_revoke_privileges_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -24715,12 +25425,15 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_args"); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->user_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); @@ -24740,12 +25453,15 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_ugi_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("user_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->user_name))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); @@ -24961,12 +25677,15 @@ uint32_t ThriftHiveMetastore_get_delegation_token_args::read(::apache::thrift::p uint32_t ThriftHiveMetastore_get_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_args"); + ++fcnt; xfer += oprot->writeFieldBegin("token_owner", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->token_owner); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("renewer_kerberos_principal_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->renewer_kerberos_principal_name); xfer += oprot->writeFieldEnd(); @@ -24978,12 +25697,15 @@ uint32_t ThriftHiveMetastore_get_delegation_token_args::write(::apache::thrift:: uint32_t ThriftHiveMetastore_get_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_delegation_token_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("token_owner", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->token_owner))); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("renewer_kerberos_principal_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->renewer_kerberos_principal_name))); xfer += oprot->writeFieldEnd(); @@ -25151,8 +25873,10 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_renew_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_args"); + ++fcnt; xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->token_str_form); xfer += oprot->writeFieldEnd(); @@ -25164,8 +25888,10 @@ uint32_t ThriftHiveMetastore_renew_delegation_token_args::write(::apache::thrift uint32_t ThriftHiveMetastore_renew_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_renew_delegation_token_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->token_str_form))); xfer += oprot->writeFieldEnd(); @@ -25333,8 +26059,10 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_args::read(::apache::thrift uint32_t ThriftHiveMetastore_cancel_delegation_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_args"); + ++fcnt; xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->token_str_form); xfer += oprot->writeFieldEnd(); @@ -25346,8 +26074,10 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_args::write(::apache::thrif uint32_t ThriftHiveMetastore_cancel_delegation_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_cancel_delegation_token_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("token_str_form", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->token_str_form))); xfer += oprot->writeFieldEnd(); @@ -25482,6 +26212,7 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_args"); xfer += oprot->writeFieldStop(); @@ -25491,6 +26222,7 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protoco uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_pargs"); xfer += oprot->writeFieldStop(); @@ -25623,6 +26355,7 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::pro uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_args"); xfer += oprot->writeFieldStop(); @@ -25632,6 +26365,7 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::pr uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_pargs"); xfer += oprot->writeFieldStop(); @@ -25777,8 +26511,10 @@ uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -25790,8 +26526,10 @@ uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -25939,8 +26677,10 @@ uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -25952,8 +26692,10 @@ uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_abort_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -26101,8 +26843,10 @@ uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -26114,8 +26858,10 @@ uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -26283,8 +27029,10 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -26296,8 +27044,10 @@ uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProto uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -26485,8 +27235,10 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -26498,8 +27250,10 @@ uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -26707,8 +27461,10 @@ uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProt uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -26720,8 +27476,10 @@ uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TPro uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -26889,8 +27647,10 @@ uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -26902,8 +27662,10 @@ uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol:: uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -27051,8 +27813,10 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args"); + ++fcnt; xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->ids.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27064,8 +27828,10 @@ uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::T uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->ids)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -27253,8 +28019,10 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args"); + ++fcnt; xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->txns.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27266,8 +28034,10 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::p uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->txns)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -27415,8 +28185,10 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27428,8 +28200,10 @@ uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TPr uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -27546,8 +28320,10 @@ uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol: uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27559,8 +28335,10 @@ uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -27708,8 +28486,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::read(::apache::thrift: uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27721,8 +28501,10 @@ uint32_t ThriftHiveMetastore_add_dynamic_partitions_args::write(::apache::thrift uint32_t ThriftHiveMetastore_add_dynamic_partitions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_dynamic_partitions_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -27890,8 +28672,10 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::read(::apache::thrift:: uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -27903,8 +28687,10 @@ uint32_t ThriftHiveMetastore_get_next_notification_args::write(::apache::thrift: uint32_t ThriftHiveMetastore_get_next_notification_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_notification_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -28039,6 +28825,7 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::read(::apache uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_args"); xfer += oprot->writeFieldStop(); @@ -28048,6 +28835,7 @@ uint32_t ThriftHiveMetastore_get_current_notificationEventId_args::write(::apach uint32_t ThriftHiveMetastore_get_current_notificationEventId_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_current_notificationEventId_pargs"); xfer += oprot->writeFieldStop(); @@ -28193,8 +28981,10 @@ uint32_t ThriftHiveMetastore_fire_listener_event_args::read(::apache::thrift::pr uint32_t ThriftHiveMetastore_fire_listener_event_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_args"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->rqst.write(oprot); xfer += oprot->writeFieldEnd(); @@ -28206,8 +28996,10 @@ uint32_t ThriftHiveMetastore_fire_listener_event_args::write(::apache::thrift::p uint32_t ThriftHiveMetastore_fire_listener_event_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHiveMetastore_fire_listener_event_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->rqst)).write(oprot); xfer += oprot->writeFieldEnd(); diff --git metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 0f86117..d9497aa 100644 --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -581,6 +581,7 @@ class ThriftHiveMetastore_getMetaConf_args { void __set_key(const std::string& val) { key = val; + __isset.key = true; } bool operator == (const ThriftHiveMetastore_getMetaConf_args & rhs) const @@ -634,10 +635,12 @@ class ThriftHiveMetastore_getMetaConf_result { void __set_success(const std::string& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_getMetaConf_result & rhs) const @@ -701,10 +704,12 @@ class ThriftHiveMetastore_setMetaConf_args { void __set_key(const std::string& val) { key = val; + __isset.key = true; } void __set_value(const std::string& val) { value = val; + __isset.value = true; } bool operator == (const ThriftHiveMetastore_setMetaConf_args & rhs) const @@ -759,6 +764,7 @@ class ThriftHiveMetastore_setMetaConf_result { void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_setMetaConf_result & rhs) const @@ -816,6 +822,7 @@ class ThriftHiveMetastore_create_database_args { void __set_database(const Database& val) { database = val; + __isset.database = true; } bool operator == (const ThriftHiveMetastore_create_database_args & rhs) const @@ -871,14 +878,17 @@ class ThriftHiveMetastore_create_database_result { void __set_o1(const AlreadyExistsException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_create_database_result & rhs) const @@ -944,6 +954,7 @@ class ThriftHiveMetastore_get_database_args { void __set_name(const std::string& val) { name = val; + __isset.name = true; } bool operator == (const ThriftHiveMetastore_get_database_args & rhs) const @@ -999,14 +1010,17 @@ class ThriftHiveMetastore_get_database_result { void __set_success(const Database& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_database_result & rhs) const @@ -1076,14 +1090,17 @@ class ThriftHiveMetastore_drop_database_args { void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } void __set_cascade(const bool val) { cascade = val; + __isset.cascade = true; } bool operator == (const ThriftHiveMetastore_drop_database_args & rhs) const @@ -1145,14 +1162,17 @@ class ThriftHiveMetastore_drop_database_result { void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidOperationException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_drop_database_result & rhs) const @@ -1218,6 +1238,7 @@ class ThriftHiveMetastore_get_databases_args { void __set_pattern(const std::string& val) { pattern = val; + __isset.pattern = true; } bool operator == (const ThriftHiveMetastore_get_databases_args & rhs) const @@ -1271,10 +1292,12 @@ class ThriftHiveMetastore_get_databases_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_databases_result & rhs) const @@ -1375,10 +1398,12 @@ class ThriftHiveMetastore_get_all_databases_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_all_databases_result & rhs) const @@ -1442,10 +1467,12 @@ class ThriftHiveMetastore_alter_database_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_db(const Database& val) { db = val; + __isset.db = true; } bool operator == (const ThriftHiveMetastore_alter_database_args & rhs) const @@ -1502,10 +1529,12 @@ class ThriftHiveMetastore_alter_database_result { void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_database_result & rhs) const @@ -1567,6 +1596,7 @@ class ThriftHiveMetastore_get_type_args { void __set_name(const std::string& val) { name = val; + __isset.name = true; } bool operator == (const ThriftHiveMetastore_get_type_args & rhs) const @@ -1622,14 +1652,17 @@ class ThriftHiveMetastore_get_type_result { void __set_success(const Type& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_type_result & rhs) const @@ -1695,6 +1728,7 @@ class ThriftHiveMetastore_create_type_args { void __set_type(const Type& val) { type = val; + __isset.type = true; } bool operator == (const ThriftHiveMetastore_create_type_args & rhs) const @@ -1752,18 +1786,22 @@ class ThriftHiveMetastore_create_type_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const AlreadyExistsException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_create_type_result & rhs) const @@ -1833,6 +1871,7 @@ class ThriftHiveMetastore_drop_type_args { void __set_type(const std::string& val) { type = val; + __isset.type = true; } bool operator == (const ThriftHiveMetastore_drop_type_args & rhs) const @@ -1888,14 +1927,17 @@ class ThriftHiveMetastore_drop_type_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_type_result & rhs) const @@ -1961,6 +2003,7 @@ class ThriftHiveMetastore_get_type_all_args { void __set_name(const std::string& val) { name = val; + __isset.name = true; } bool operator == (const ThriftHiveMetastore_get_type_all_args & rhs) const @@ -2014,10 +2057,12 @@ class ThriftHiveMetastore_get_type_all_result { void __set_success(const std::map & val) { success = val; + __isset.success = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_type_all_result & rhs) const @@ -2081,10 +2126,12 @@ class ThriftHiveMetastore_get_fields_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_table_name(const std::string& val) { table_name = val; + __isset.table_name = true; } bool operator == (const ThriftHiveMetastore_get_fields_args & rhs) const @@ -2145,18 +2192,22 @@ class ThriftHiveMetastore_get_fields_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const UnknownTableException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_get_fields_result & rhs) const @@ -2230,14 +2281,17 @@ class ThriftHiveMetastore_get_fields_with_environment_context_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_table_name(const std::string& val) { table_name = val; + __isset.table_name = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_get_fields_with_environment_context_args & rhs) const @@ -2301,18 +2355,22 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const UnknownTableException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_get_fields_with_environment_context_result & rhs) const @@ -2384,10 +2442,12 @@ class ThriftHiveMetastore_get_schema_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_table_name(const std::string& val) { table_name = val; + __isset.table_name = true; } bool operator == (const ThriftHiveMetastore_get_schema_args & rhs) const @@ -2448,18 +2508,22 @@ class ThriftHiveMetastore_get_schema_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const UnknownTableException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_get_schema_result & rhs) const @@ -2533,14 +2597,17 @@ class ThriftHiveMetastore_get_schema_with_environment_context_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_table_name(const std::string& val) { table_name = val; + __isset.table_name = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_get_schema_with_environment_context_args & rhs) const @@ -2604,18 +2671,22 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const UnknownTableException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_get_schema_with_environment_context_result & rhs) const @@ -2685,6 +2756,7 @@ class ThriftHiveMetastore_create_table_args { void __set_tbl(const Table& val) { tbl = val; + __isset.tbl = true; } bool operator == (const ThriftHiveMetastore_create_table_args & rhs) const @@ -2742,18 +2814,22 @@ class ThriftHiveMetastore_create_table_result { void __set_o1(const AlreadyExistsException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const NoSuchObjectException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_create_table_result & rhs) const @@ -2825,10 +2901,12 @@ class ThriftHiveMetastore_create_table_with_environment_context_args { void __set_tbl(const Table& val) { tbl = val; + __isset.tbl = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_create_table_with_environment_context_args & rhs) const @@ -2889,18 +2967,22 @@ class ThriftHiveMetastore_create_table_with_environment_context_result { void __set_o1(const AlreadyExistsException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const NoSuchObjectException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_create_table_with_environment_context_result & rhs) const @@ -2974,14 +3056,17 @@ class ThriftHiveMetastore_drop_table_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } bool operator == (const ThriftHiveMetastore_drop_table_args & rhs) const @@ -3041,10 +3126,12 @@ class ThriftHiveMetastore_drop_table_result { void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_drop_table_result & rhs) const @@ -3112,18 +3199,22 @@ class ThriftHiveMetastore_drop_table_with_environment_context_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_drop_table_with_environment_context_args & rhs) const @@ -3186,10 +3277,12 @@ class ThriftHiveMetastore_drop_table_with_environment_context_result { void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_drop_table_with_environment_context_result & rhs) const @@ -3253,10 +3346,12 @@ class ThriftHiveMetastore_get_tables_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_pattern(const std::string& val) { pattern = val; + __isset.pattern = true; } bool operator == (const ThriftHiveMetastore_get_tables_args & rhs) const @@ -3313,10 +3408,12 @@ class ThriftHiveMetastore_get_tables_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_tables_result & rhs) const @@ -3378,6 +3475,7 @@ class ThriftHiveMetastore_get_all_tables_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } bool operator == (const ThriftHiveMetastore_get_all_tables_args & rhs) const @@ -3431,10 +3529,12 @@ class ThriftHiveMetastore_get_all_tables_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_all_tables_result & rhs) const @@ -3498,10 +3598,12 @@ class ThriftHiveMetastore_get_table_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } bool operator == (const ThriftHiveMetastore_get_table_args & rhs) const @@ -3560,14 +3662,17 @@ class ThriftHiveMetastore_get_table_result { void __set_success(const Table& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_table_result & rhs) const @@ -3635,10 +3740,12 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_tbl_names(const std::vector & val) { tbl_names = val; + __isset.tbl_names = true; } bool operator == (const ThriftHiveMetastore_get_table_objects_by_name_args & rhs) const @@ -3699,18 +3806,22 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidOperationException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_get_table_objects_by_name_result & rhs) const @@ -3784,14 +3895,17 @@ class ThriftHiveMetastore_get_table_names_by_filter_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_filter(const std::string& val) { filter = val; + __isset.filter = true; } void __set_max_tables(const int16_t val) { max_tables = val; + __isset.max_tables = true; } bool operator == (const ThriftHiveMetastore_get_table_names_by_filter_args & rhs) const @@ -3855,18 +3969,22 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidOperationException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_get_table_names_by_filter_result & rhs) const @@ -3940,14 +4058,17 @@ class ThriftHiveMetastore_alter_table_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_new_tbl(const Table& val) { new_tbl = val; + __isset.new_tbl = true; } bool operator == (const ThriftHiveMetastore_alter_table_args & rhs) const @@ -4007,10 +4128,12 @@ class ThriftHiveMetastore_alter_table_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_table_result & rhs) const @@ -4078,18 +4201,22 @@ class ThriftHiveMetastore_alter_table_with_environment_context_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_new_tbl(const Table& val) { new_tbl = val; + __isset.new_tbl = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_alter_table_with_environment_context_args & rhs) const @@ -4152,10 +4279,12 @@ class ThriftHiveMetastore_alter_table_with_environment_context_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_table_with_environment_context_result & rhs) const @@ -4223,18 +4352,22 @@ class ThriftHiveMetastore_alter_table_with_cascade_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_new_tbl(const Table& val) { new_tbl = val; + __isset.new_tbl = true; } void __set_cascade(const bool val) { cascade = val; + __isset.cascade = true; } bool operator == (const ThriftHiveMetastore_alter_table_with_cascade_args & rhs) const @@ -4297,10 +4430,12 @@ class ThriftHiveMetastore_alter_table_with_cascade_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_table_with_cascade_result & rhs) const @@ -4362,6 +4497,7 @@ class ThriftHiveMetastore_add_partition_args { void __set_new_part(const Partition& val) { new_part = val; + __isset.new_part = true; } bool operator == (const ThriftHiveMetastore_add_partition_args & rhs) const @@ -4419,18 +4555,22 @@ class ThriftHiveMetastore_add_partition_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_add_partition_result & rhs) const @@ -4502,10 +4642,12 @@ class ThriftHiveMetastore_add_partition_with_environment_context_args { void __set_new_part(const Partition& val) { new_part = val; + __isset.new_part = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_add_partition_with_environment_context_args & rhs) const @@ -4566,18 +4708,22 @@ class ThriftHiveMetastore_add_partition_with_environment_context_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_add_partition_with_environment_context_result & rhs) const @@ -4647,6 +4793,7 @@ class ThriftHiveMetastore_add_partitions_args { void __set_new_parts(const std::vector & val) { new_parts = val; + __isset.new_parts = true; } bool operator == (const ThriftHiveMetastore_add_partitions_args & rhs) const @@ -4704,18 +4851,22 @@ class ThriftHiveMetastore_add_partitions_result { void __set_success(const int32_t val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_add_partitions_result & rhs) const @@ -4785,6 +4936,7 @@ class ThriftHiveMetastore_add_partitions_pspec_args { void __set_new_parts(const std::vector & val) { new_parts = val; + __isset.new_parts = true; } bool operator == (const ThriftHiveMetastore_add_partitions_pspec_args & rhs) const @@ -4842,18 +4994,22 @@ class ThriftHiveMetastore_add_partitions_pspec_result { void __set_success(const int32_t val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_add_partitions_pspec_result & rhs) const @@ -4927,14 +5083,17 @@ class ThriftHiveMetastore_append_partition_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } bool operator == (const ThriftHiveMetastore_append_partition_args & rhs) const @@ -4998,18 +5157,22 @@ class ThriftHiveMetastore_append_partition_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_append_partition_result & rhs) const @@ -5079,6 +5242,7 @@ class ThriftHiveMetastore_add_partitions_req_args { void __set_request(const AddPartitionsRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_add_partitions_req_args & rhs) const @@ -5136,18 +5300,22 @@ class ThriftHiveMetastore_add_partitions_req_result { void __set_success(const AddPartitionsResult& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_add_partitions_req_result & rhs) const @@ -5223,18 +5391,22 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_append_partition_with_environment_context_args & rhs) const @@ -5301,18 +5473,22 @@ class ThriftHiveMetastore_append_partition_with_environment_context_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_append_partition_with_environment_context_result & rhs) const @@ -5386,14 +5562,17 @@ class ThriftHiveMetastore_append_partition_by_name_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } bool operator == (const ThriftHiveMetastore_append_partition_by_name_args & rhs) const @@ -5457,18 +5636,22 @@ class ThriftHiveMetastore_append_partition_by_name_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_append_partition_by_name_result & rhs) const @@ -5544,18 +5727,22 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_args void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_append_partition_by_name_with_environment_context_args & rhs) const @@ -5622,18 +5809,22 @@ class ThriftHiveMetastore_append_partition_by_name_with_environment_context_resu void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_append_partition_by_name_with_environment_context_result & rhs) const @@ -5709,18 +5900,22 @@ class ThriftHiveMetastore_drop_partition_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } bool operator == (const ThriftHiveMetastore_drop_partition_args & rhs) const @@ -5785,14 +5980,17 @@ class ThriftHiveMetastore_drop_partition_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_partition_result & rhs) const @@ -5866,22 +6064,27 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_drop_partition_with_environment_context_args & rhs) const @@ -5949,14 +6152,17 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_partition_with_environment_context_result & rhs) const @@ -6028,18 +6234,22 @@ class ThriftHiveMetastore_drop_partition_by_name_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } bool operator == (const ThriftHiveMetastore_drop_partition_by_name_args & rhs) const @@ -6104,14 +6314,17 @@ class ThriftHiveMetastore_drop_partition_by_name_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_partition_by_name_result & rhs) const @@ -6185,22 +6398,27 @@ class ThriftHiveMetastore_drop_partition_by_name_with_environment_context_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_drop_partition_by_name_with_environment_context_args & rhs) const @@ -6268,14 +6486,17 @@ class ThriftHiveMetastore_drop_partition_by_name_with_environment_context_result void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_partition_by_name_with_environment_context_result & rhs) const @@ -6341,6 +6562,7 @@ class ThriftHiveMetastore_drop_partitions_req_args { void __set_req(const DropPartitionsRequest& val) { req = val; + __isset.req = true; } bool operator == (const ThriftHiveMetastore_drop_partitions_req_args & rhs) const @@ -6396,14 +6618,17 @@ class ThriftHiveMetastore_drop_partitions_req_result { void __set_success(const DropPartitionsResult& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_partitions_req_result & rhs) const @@ -6473,14 +6698,17 @@ class ThriftHiveMetastore_get_partition_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } bool operator == (const ThriftHiveMetastore_get_partition_args & rhs) const @@ -6542,14 +6770,17 @@ class ThriftHiveMetastore_get_partition_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partition_result & rhs) const @@ -6623,22 +6854,27 @@ class ThriftHiveMetastore_exchange_partition_args { void __set_partitionSpecs(const std::map & val) { partitionSpecs = val; + __isset.partitionSpecs = true; } void __set_source_db(const std::string& val) { source_db = val; + __isset.source_db = true; } void __set_source_table_name(const std::string& val) { source_table_name = val; + __isset.source_table_name = true; } void __set_dest_db(const std::string& val) { dest_db = val; + __isset.dest_db = true; } void __set_dest_table_name(const std::string& val) { dest_table_name = val; + __isset.dest_table_name = true; } bool operator == (const ThriftHiveMetastore_exchange_partition_args & rhs) const @@ -6710,22 +6946,27 @@ class ThriftHiveMetastore_exchange_partition_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const InvalidObjectException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidInputException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_exchange_partition_result & rhs) const @@ -6807,22 +7048,27 @@ class ThriftHiveMetastore_get_partition_with_auth_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_user_name(const std::string& val) { user_name = val; + __isset.user_name = true; } void __set_group_names(const std::vector & val) { group_names = val; + __isset.group_names = true; } bool operator == (const ThriftHiveMetastore_get_partition_with_auth_args & rhs) const @@ -6890,14 +7136,17 @@ class ThriftHiveMetastore_get_partition_with_auth_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partition_with_auth_result & rhs) const @@ -6967,14 +7216,17 @@ class ThriftHiveMetastore_get_partition_by_name_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } bool operator == (const ThriftHiveMetastore_get_partition_by_name_args & rhs) const @@ -7036,14 +7288,17 @@ class ThriftHiveMetastore_get_partition_by_name_result { void __set_success(const Partition& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partition_by_name_result & rhs) const @@ -7113,14 +7368,17 @@ class ThriftHiveMetastore_get_partitions_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_partitions_args & rhs) const @@ -7182,14 +7440,17 @@ class ThriftHiveMetastore_get_partitions_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_result & rhs) const @@ -7263,22 +7524,27 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } void __set_user_name(const std::string& val) { user_name = val; + __isset.user_name = true; } void __set_group_names(const std::vector & val) { group_names = val; + __isset.group_names = true; } bool operator == (const ThriftHiveMetastore_get_partitions_with_auth_args & rhs) const @@ -7346,14 +7612,17 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_with_auth_result & rhs) const @@ -7423,14 +7692,17 @@ class ThriftHiveMetastore_get_partitions_pspec_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_max_parts(const int32_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_partitions_pspec_args & rhs) const @@ -7492,14 +7764,17 @@ class ThriftHiveMetastore_get_partitions_pspec_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_pspec_result & rhs) const @@ -7569,14 +7844,17 @@ class ThriftHiveMetastore_get_partition_names_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_partition_names_args & rhs) const @@ -7636,10 +7914,12 @@ class ThriftHiveMetastore_get_partition_names_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partition_names_result & rhs) const @@ -7707,18 +7987,22 @@ class ThriftHiveMetastore_get_partitions_ps_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_partitions_ps_args & rhs) const @@ -7783,14 +8067,17 @@ class ThriftHiveMetastore_get_partitions_ps_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_ps_result & rhs) const @@ -7866,26 +8153,32 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } void __set_user_name(const std::string& val) { user_name = val; + __isset.user_name = true; } void __set_group_names(const std::vector & val) { group_names = val; + __isset.group_names = true; } bool operator == (const ThriftHiveMetastore_get_partitions_ps_with_auth_args & rhs) const @@ -7956,14 +8249,17 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_ps_with_auth_result & rhs) const @@ -8035,18 +8331,22 @@ class ThriftHiveMetastore_get_partition_names_ps_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_partition_names_ps_args & rhs) const @@ -8111,14 +8411,17 @@ class ThriftHiveMetastore_get_partition_names_ps_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partition_names_ps_result & rhs) const @@ -8190,18 +8493,22 @@ class ThriftHiveMetastore_get_partitions_by_filter_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_filter(const std::string& val) { filter = val; + __isset.filter = true; } void __set_max_parts(const int16_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_partitions_by_filter_args & rhs) const @@ -8266,14 +8573,17 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_by_filter_result & rhs) const @@ -8345,18 +8655,22 @@ class ThriftHiveMetastore_get_part_specs_by_filter_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_filter(const std::string& val) { filter = val; + __isset.filter = true; } void __set_max_parts(const int32_t val) { max_parts = val; + __isset.max_parts = true; } bool operator == (const ThriftHiveMetastore_get_part_specs_by_filter_args & rhs) const @@ -8421,14 +8735,17 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_part_specs_by_filter_result & rhs) const @@ -8494,6 +8811,7 @@ class ThriftHiveMetastore_get_partitions_by_expr_args { void __set_req(const PartitionsByExprRequest& val) { req = val; + __isset.req = true; } bool operator == (const ThriftHiveMetastore_get_partitions_by_expr_args & rhs) const @@ -8549,14 +8867,17 @@ class ThriftHiveMetastore_get_partitions_by_expr_result { void __set_success(const PartitionsByExprResult& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_by_expr_result & rhs) const @@ -8626,14 +8947,17 @@ class ThriftHiveMetastore_get_partitions_by_names_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_names(const std::vector & val) { names = val; + __isset.names = true; } bool operator == (const ThriftHiveMetastore_get_partitions_by_names_args & rhs) const @@ -8695,14 +9019,17 @@ class ThriftHiveMetastore_get_partitions_by_names_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_by_names_result & rhs) const @@ -8772,14 +9099,17 @@ class ThriftHiveMetastore_alter_partition_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_new_part(const Partition& val) { new_part = val; + __isset.new_part = true; } bool operator == (const ThriftHiveMetastore_alter_partition_args & rhs) const @@ -8839,10 +9169,12 @@ class ThriftHiveMetastore_alter_partition_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_partition_result & rhs) const @@ -8908,14 +9240,17 @@ class ThriftHiveMetastore_alter_partitions_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_new_parts(const std::vector & val) { new_parts = val; + __isset.new_parts = true; } bool operator == (const ThriftHiveMetastore_alter_partitions_args & rhs) const @@ -8975,10 +9310,12 @@ class ThriftHiveMetastore_alter_partitions_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_partitions_result & rhs) const @@ -9046,18 +9383,22 @@ class ThriftHiveMetastore_alter_partition_with_environment_context_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_new_part(const Partition& val) { new_part = val; + __isset.new_part = true; } void __set_environment_context(const EnvironmentContext& val) { environment_context = val; + __isset.environment_context = true; } bool operator == (const ThriftHiveMetastore_alter_partition_with_environment_context_args & rhs) const @@ -9120,10 +9461,12 @@ class ThriftHiveMetastore_alter_partition_with_environment_context_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_partition_with_environment_context_result & rhs) const @@ -9191,18 +9534,22 @@ class ThriftHiveMetastore_rename_partition_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_new_part(const Partition& val) { new_part = val; + __isset.new_part = true; } bool operator == (const ThriftHiveMetastore_rename_partition_args & rhs) const @@ -9265,10 +9612,12 @@ class ThriftHiveMetastore_rename_partition_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_rename_partition_result & rhs) const @@ -9332,10 +9681,12 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { void __set_part_vals(const std::vector & val) { part_vals = val; + __isset.part_vals = true; } void __set_throw_exception(const bool val) { throw_exception = val; + __isset.throw_exception = true; } bool operator == (const ThriftHiveMetastore_partition_name_has_valid_characters_args & rhs) const @@ -9392,10 +9743,12 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_partition_name_has_valid_characters_result & rhs) const @@ -9459,10 +9812,12 @@ class ThriftHiveMetastore_get_config_value_args { void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_defaultValue(const std::string& val) { defaultValue = val; + __isset.defaultValue = true; } bool operator == (const ThriftHiveMetastore_get_config_value_args & rhs) const @@ -9519,10 +9874,12 @@ class ThriftHiveMetastore_get_config_value_result { void __set_success(const std::string& val) { success = val; + __isset.success = true; } void __set_o1(const ConfigValSecurityException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_config_value_result & rhs) const @@ -9584,6 +9941,7 @@ class ThriftHiveMetastore_partition_name_to_vals_args { void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } bool operator == (const ThriftHiveMetastore_partition_name_to_vals_args & rhs) const @@ -9637,10 +9995,12 @@ class ThriftHiveMetastore_partition_name_to_vals_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_partition_name_to_vals_result & rhs) const @@ -9702,6 +10062,7 @@ class ThriftHiveMetastore_partition_name_to_spec_args { void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } bool operator == (const ThriftHiveMetastore_partition_name_to_spec_args & rhs) const @@ -9755,10 +10116,12 @@ class ThriftHiveMetastore_partition_name_to_spec_result { void __set_success(const std::map & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_partition_name_to_spec_result & rhs) const @@ -9826,18 +10189,22 @@ class ThriftHiveMetastore_markPartitionForEvent_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::map & val) { part_vals = val; + __isset.part_vals = true; } void __set_eventType(const PartitionEventType::type val) { eventType = val; + __isset.eventType = true; } bool operator == (const ThriftHiveMetastore_markPartitionForEvent_args & rhs) const @@ -9908,26 +10275,32 @@ class ThriftHiveMetastore_markPartitionForEvent_result { void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const UnknownTableException& val) { o4 = val; + __isset.o4 = true; } void __set_o5(const UnknownPartitionException& val) { o5 = val; + __isset.o5 = true; } void __set_o6(const InvalidPartitionException& val) { o6 = val; + __isset.o6 = true; } bool operator == (const ThriftHiveMetastore_markPartitionForEvent_result & rhs) const @@ -10011,18 +10384,22 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_vals(const std::map & val) { part_vals = val; + __isset.part_vals = true; } void __set_eventType(const PartitionEventType::type val) { eventType = val; + __isset.eventType = true; } bool operator == (const ThriftHiveMetastore_isPartitionMarkedForEvent_args & rhs) const @@ -10095,30 +10472,37 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const UnknownDBException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const UnknownTableException& val) { o4 = val; + __isset.o4 = true; } void __set_o5(const UnknownPartitionException& val) { o5 = val; + __isset.o5 = true; } void __set_o6(const InvalidPartitionException& val) { o6 = val; + __isset.o6 = true; } bool operator == (const ThriftHiveMetastore_isPartitionMarkedForEvent_result & rhs) const @@ -10202,10 +10586,12 @@ class ThriftHiveMetastore_add_index_args { void __set_new_index(const Index& val) { new_index = val; + __isset.new_index = true; } void __set_index_table(const Table& val) { index_table = val; + __isset.index_table = true; } bool operator == (const ThriftHiveMetastore_add_index_args & rhs) const @@ -10266,18 +10652,22 @@ class ThriftHiveMetastore_add_index_result { void __set_success(const Index& val) { success = val; + __isset.success = true; } void __set_o1(const InvalidObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const AlreadyExistsException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_add_index_result & rhs) const @@ -10353,18 +10743,22 @@ class ThriftHiveMetastore_alter_index_args { void __set_dbname(const std::string& val) { dbname = val; + __isset.dbname = true; } void __set_base_tbl_name(const std::string& val) { base_tbl_name = val; + __isset.base_tbl_name = true; } void __set_idx_name(const std::string& val) { idx_name = val; + __isset.idx_name = true; } void __set_new_idx(const Index& val) { new_idx = val; + __isset.new_idx = true; } bool operator == (const ThriftHiveMetastore_alter_index_args & rhs) const @@ -10427,10 +10821,12 @@ class ThriftHiveMetastore_alter_index_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_index_result & rhs) const @@ -10498,18 +10894,22 @@ class ThriftHiveMetastore_drop_index_by_name_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_index_name(const std::string& val) { index_name = val; + __isset.index_name = true; } void __set_deleteData(const bool val) { deleteData = val; + __isset.deleteData = true; } bool operator == (const ThriftHiveMetastore_drop_index_by_name_args & rhs) const @@ -10574,14 +10974,17 @@ class ThriftHiveMetastore_drop_index_by_name_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_drop_index_by_name_result & rhs) const @@ -10651,14 +11054,17 @@ class ThriftHiveMetastore_get_index_by_name_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_index_name(const std::string& val) { index_name = val; + __isset.index_name = true; } bool operator == (const ThriftHiveMetastore_get_index_by_name_args & rhs) const @@ -10720,14 +11126,17 @@ class ThriftHiveMetastore_get_index_by_name_result { void __set_success(const Index& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_index_by_name_result & rhs) const @@ -10797,14 +11206,17 @@ class ThriftHiveMetastore_get_indexes_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_max_indexes(const int16_t val) { max_indexes = val; + __isset.max_indexes = true; } bool operator == (const ThriftHiveMetastore_get_indexes_args & rhs) const @@ -10866,14 +11278,17 @@ class ThriftHiveMetastore_get_indexes_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_indexes_result & rhs) const @@ -10943,14 +11358,17 @@ class ThriftHiveMetastore_get_index_names_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_max_indexes(const int16_t val) { max_indexes = val; + __isset.max_indexes = true; } bool operator == (const ThriftHiveMetastore_get_index_names_args & rhs) const @@ -11010,10 +11428,12 @@ class ThriftHiveMetastore_get_index_names_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_index_names_result & rhs) const @@ -11075,6 +11495,7 @@ class ThriftHiveMetastore_update_table_column_statistics_args { void __set_stats_obj(const ColumnStatistics& val) { stats_obj = val; + __isset.stats_obj = true; } bool operator == (const ThriftHiveMetastore_update_table_column_statistics_args & rhs) const @@ -11134,22 +11555,27 @@ class ThriftHiveMetastore_update_table_column_statistics_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidInputException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_update_table_column_statistics_result & rhs) const @@ -11223,6 +11649,7 @@ class ThriftHiveMetastore_update_partition_column_statistics_args { void __set_stats_obj(const ColumnStatistics& val) { stats_obj = val; + __isset.stats_obj = true; } bool operator == (const ThriftHiveMetastore_update_partition_column_statistics_args & rhs) const @@ -11282,22 +11709,27 @@ class ThriftHiveMetastore_update_partition_column_statistics_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidInputException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_update_partition_column_statistics_result & rhs) const @@ -11375,14 +11807,17 @@ class ThriftHiveMetastore_get_table_column_statistics_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_col_name(const std::string& val) { col_name = val; + __isset.col_name = true; } bool operator == (const ThriftHiveMetastore_get_table_column_statistics_args & rhs) const @@ -11448,22 +11883,27 @@ class ThriftHiveMetastore_get_table_column_statistics_result { void __set_success(const ColumnStatistics& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const InvalidInputException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidObjectException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_get_table_column_statistics_result & rhs) const @@ -11543,18 +11983,22 @@ class ThriftHiveMetastore_get_partition_column_statistics_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } void __set_col_name(const std::string& val) { col_name = val; + __isset.col_name = true; } bool operator == (const ThriftHiveMetastore_get_partition_column_statistics_args & rhs) const @@ -11623,22 +12067,27 @@ class ThriftHiveMetastore_get_partition_column_statistics_result { void __set_success(const ColumnStatistics& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const InvalidInputException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidObjectException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_get_partition_column_statistics_result & rhs) const @@ -11712,6 +12161,7 @@ class ThriftHiveMetastore_get_table_statistics_req_args { void __set_request(const TableStatsRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_get_table_statistics_req_args & rhs) const @@ -11767,14 +12217,17 @@ class ThriftHiveMetastore_get_table_statistics_req_result { void __set_success(const TableStatsResult& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_table_statistics_req_result & rhs) const @@ -11840,6 +12293,7 @@ class ThriftHiveMetastore_get_partitions_statistics_req_args { void __set_request(const PartitionsStatsRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_get_partitions_statistics_req_args & rhs) const @@ -11895,14 +12349,17 @@ class ThriftHiveMetastore_get_partitions_statistics_req_result { void __set_success(const PartitionsStatsResult& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_partitions_statistics_req_result & rhs) const @@ -11968,6 +12425,7 @@ class ThriftHiveMetastore_get_aggr_stats_for_args { void __set_request(const PartitionsStatsRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_get_aggr_stats_for_args & rhs) const @@ -12023,14 +12481,17 @@ class ThriftHiveMetastore_get_aggr_stats_for_result { void __set_success(const AggrStats& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_aggr_stats_for_result & rhs) const @@ -12096,6 +12557,7 @@ class ThriftHiveMetastore_set_aggr_stats_for_args { void __set_request(const SetPartitionsStatsRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_set_aggr_stats_for_args & rhs) const @@ -12155,22 +12617,27 @@ class ThriftHiveMetastore_set_aggr_stats_for_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidInputException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_set_aggr_stats_for_result & rhs) const @@ -12250,18 +12717,22 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_part_name(const std::string& val) { part_name = val; + __isset.part_name = true; } void __set_col_name(const std::string& val) { col_name = val; + __isset.col_name = true; } bool operator == (const ThriftHiveMetastore_delete_partition_column_statistics_args & rhs) const @@ -12330,22 +12801,27 @@ class ThriftHiveMetastore_delete_partition_column_statistics_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const InvalidObjectException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidInputException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_delete_partition_column_statistics_result & rhs) const @@ -12423,14 +12899,17 @@ class ThriftHiveMetastore_delete_table_column_statistics_args { void __set_db_name(const std::string& val) { db_name = val; + __isset.db_name = true; } void __set_tbl_name(const std::string& val) { tbl_name = val; + __isset.tbl_name = true; } void __set_col_name(const std::string& val) { col_name = val; + __isset.col_name = true; } bool operator == (const ThriftHiveMetastore_delete_table_column_statistics_args & rhs) const @@ -12496,22 +12975,27 @@ class ThriftHiveMetastore_delete_table_column_statistics_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const InvalidObjectException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const InvalidInputException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_delete_table_column_statistics_result & rhs) const @@ -12585,6 +13069,7 @@ class ThriftHiveMetastore_create_function_args { void __set_func(const Function& val) { func = val; + __isset.func = true; } bool operator == (const ThriftHiveMetastore_create_function_args & rhs) const @@ -12642,18 +13127,22 @@ class ThriftHiveMetastore_create_function_result { void __set_o1(const AlreadyExistsException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const InvalidObjectException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } void __set_o4(const NoSuchObjectException& val) { o4 = val; + __isset.o4 = true; } bool operator == (const ThriftHiveMetastore_create_function_result & rhs) const @@ -12725,10 +13214,12 @@ class ThriftHiveMetastore_drop_function_args { void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_funcName(const std::string& val) { funcName = val; + __isset.funcName = true; } bool operator == (const ThriftHiveMetastore_drop_function_args & rhs) const @@ -12785,10 +13276,12 @@ class ThriftHiveMetastore_drop_function_result { void __set_o1(const NoSuchObjectException& val) { o1 = val; + __isset.o1 = true; } void __set_o3(const MetaException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_drop_function_result & rhs) const @@ -12854,14 +13347,17 @@ class ThriftHiveMetastore_alter_function_args { void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_funcName(const std::string& val) { funcName = val; + __isset.funcName = true; } void __set_newFunc(const Function& val) { newFunc = val; + __isset.newFunc = true; } bool operator == (const ThriftHiveMetastore_alter_function_args & rhs) const @@ -12921,10 +13417,12 @@ class ThriftHiveMetastore_alter_function_result { void __set_o1(const InvalidOperationException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const MetaException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_alter_function_result & rhs) const @@ -12988,10 +13486,12 @@ class ThriftHiveMetastore_get_functions_args { void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_pattern(const std::string& val) { pattern = val; + __isset.pattern = true; } bool operator == (const ThriftHiveMetastore_get_functions_args & rhs) const @@ -13048,10 +13548,12 @@ class ThriftHiveMetastore_get_functions_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_functions_result & rhs) const @@ -13115,10 +13617,12 @@ class ThriftHiveMetastore_get_function_args { void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_funcName(const std::string& val) { funcName = val; + __isset.funcName = true; } bool operator == (const ThriftHiveMetastore_get_function_args & rhs) const @@ -13177,14 +13681,17 @@ class ThriftHiveMetastore_get_function_result { void __set_success(const Function& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchObjectException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_get_function_result & rhs) const @@ -13250,6 +13757,7 @@ class ThriftHiveMetastore_create_role_args { void __set_role(const Role& val) { role = val; + __isset.role = true; } bool operator == (const ThriftHiveMetastore_create_role_args & rhs) const @@ -13303,10 +13811,12 @@ class ThriftHiveMetastore_create_role_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_create_role_result & rhs) const @@ -13368,6 +13878,7 @@ class ThriftHiveMetastore_drop_role_args { void __set_role_name(const std::string& val) { role_name = val; + __isset.role_name = true; } bool operator == (const ThriftHiveMetastore_drop_role_args & rhs) const @@ -13421,10 +13932,12 @@ class ThriftHiveMetastore_drop_role_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_drop_role_result & rhs) const @@ -13525,10 +14038,12 @@ class ThriftHiveMetastore_get_role_names_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_role_names_result & rhs) const @@ -13600,26 +14115,32 @@ class ThriftHiveMetastore_grant_role_args { void __set_role_name(const std::string& val) { role_name = val; + __isset.role_name = true; } void __set_principal_name(const std::string& val) { principal_name = val; + __isset.principal_name = true; } void __set_principal_type(const PrincipalType::type val) { principal_type = val; + __isset.principal_type = true; } void __set_grantor(const std::string& val) { grantor = val; + __isset.grantor = true; } void __set_grantorType(const PrincipalType::type val) { grantorType = val; + __isset.grantorType = true; } void __set_grant_option(const bool val) { grant_option = val; + __isset.grant_option = true; } bool operator == (const ThriftHiveMetastore_grant_role_args & rhs) const @@ -13688,10 +14209,12 @@ class ThriftHiveMetastore_grant_role_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_grant_role_result & rhs) const @@ -13757,14 +14280,17 @@ class ThriftHiveMetastore_revoke_role_args { void __set_role_name(const std::string& val) { role_name = val; + __isset.role_name = true; } void __set_principal_name(const std::string& val) { principal_name = val; + __isset.principal_name = true; } void __set_principal_type(const PrincipalType::type val) { principal_type = val; + __isset.principal_type = true; } bool operator == (const ThriftHiveMetastore_revoke_role_args & rhs) const @@ -13824,10 +14350,12 @@ class ThriftHiveMetastore_revoke_role_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_revoke_role_result & rhs) const @@ -13891,10 +14419,12 @@ class ThriftHiveMetastore_list_roles_args { void __set_principal_name(const std::string& val) { principal_name = val; + __isset.principal_name = true; } void __set_principal_type(const PrincipalType::type val) { principal_type = val; + __isset.principal_type = true; } bool operator == (const ThriftHiveMetastore_list_roles_args & rhs) const @@ -13951,10 +14481,12 @@ class ThriftHiveMetastore_list_roles_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_list_roles_result & rhs) const @@ -14016,6 +14548,7 @@ class ThriftHiveMetastore_grant_revoke_role_args { void __set_request(const GrantRevokeRoleRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_grant_revoke_role_args & rhs) const @@ -14069,10 +14602,12 @@ class ThriftHiveMetastore_grant_revoke_role_result { void __set_success(const GrantRevokeRoleResponse& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_grant_revoke_role_result & rhs) const @@ -14134,6 +14669,7 @@ class ThriftHiveMetastore_get_principals_in_role_args { void __set_request(const GetPrincipalsInRoleRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_get_principals_in_role_args & rhs) const @@ -14187,10 +14723,12 @@ class ThriftHiveMetastore_get_principals_in_role_result { void __set_success(const GetPrincipalsInRoleResponse& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_principals_in_role_result & rhs) const @@ -14252,6 +14790,7 @@ class ThriftHiveMetastore_get_role_grants_for_principal_args { void __set_request(const GetRoleGrantsForPrincipalRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_get_role_grants_for_principal_args & rhs) const @@ -14305,10 +14844,12 @@ class ThriftHiveMetastore_get_role_grants_for_principal_result { void __set_success(const GetRoleGrantsForPrincipalResponse& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_role_grants_for_principal_result & rhs) const @@ -14374,14 +14915,17 @@ class ThriftHiveMetastore_get_privilege_set_args { void __set_hiveObject(const HiveObjectRef& val) { hiveObject = val; + __isset.hiveObject = true; } void __set_user_name(const std::string& val) { user_name = val; + __isset.user_name = true; } void __set_group_names(const std::vector & val) { group_names = val; + __isset.group_names = true; } bool operator == (const ThriftHiveMetastore_get_privilege_set_args & rhs) const @@ -14441,10 +14985,12 @@ class ThriftHiveMetastore_get_privilege_set_result { void __set_success(const PrincipalPrivilegeSet& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_privilege_set_result & rhs) const @@ -14510,14 +15056,17 @@ class ThriftHiveMetastore_list_privileges_args { void __set_principal_name(const std::string& val) { principal_name = val; + __isset.principal_name = true; } void __set_principal_type(const PrincipalType::type val) { principal_type = val; + __isset.principal_type = true; } void __set_hiveObject(const HiveObjectRef& val) { hiveObject = val; + __isset.hiveObject = true; } bool operator == (const ThriftHiveMetastore_list_privileges_args & rhs) const @@ -14577,10 +15126,12 @@ class ThriftHiveMetastore_list_privileges_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_list_privileges_result & rhs) const @@ -14642,6 +15193,7 @@ class ThriftHiveMetastore_grant_privileges_args { void __set_privileges(const PrivilegeBag& val) { privileges = val; + __isset.privileges = true; } bool operator == (const ThriftHiveMetastore_grant_privileges_args & rhs) const @@ -14695,10 +15247,12 @@ class ThriftHiveMetastore_grant_privileges_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_grant_privileges_result & rhs) const @@ -14760,6 +15314,7 @@ class ThriftHiveMetastore_revoke_privileges_args { void __set_privileges(const PrivilegeBag& val) { privileges = val; + __isset.privileges = true; } bool operator == (const ThriftHiveMetastore_revoke_privileges_args & rhs) const @@ -14813,10 +15368,12 @@ class ThriftHiveMetastore_revoke_privileges_result { void __set_success(const bool val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_revoke_privileges_result & rhs) const @@ -14878,6 +15435,7 @@ class ThriftHiveMetastore_grant_revoke_privileges_args { void __set_request(const GrantRevokePrivilegeRequest& val) { request = val; + __isset.request = true; } bool operator == (const ThriftHiveMetastore_grant_revoke_privileges_args & rhs) const @@ -14931,10 +15489,12 @@ class ThriftHiveMetastore_grant_revoke_privileges_result { void __set_success(const GrantRevokePrivilegeResponse& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_grant_revoke_privileges_result & rhs) const @@ -14998,10 +15558,12 @@ class ThriftHiveMetastore_set_ugi_args { void __set_user_name(const std::string& val) { user_name = val; + __isset.user_name = true; } void __set_group_names(const std::vector & val) { group_names = val; + __isset.group_names = true; } bool operator == (const ThriftHiveMetastore_set_ugi_args & rhs) const @@ -15058,10 +15620,12 @@ class ThriftHiveMetastore_set_ugi_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_set_ugi_result & rhs) const @@ -15125,10 +15689,12 @@ class ThriftHiveMetastore_get_delegation_token_args { void __set_token_owner(const std::string& val) { token_owner = val; + __isset.token_owner = true; } void __set_renewer_kerberos_principal_name(const std::string& val) { renewer_kerberos_principal_name = val; + __isset.renewer_kerberos_principal_name = true; } bool operator == (const ThriftHiveMetastore_get_delegation_token_args & rhs) const @@ -15185,10 +15751,12 @@ class ThriftHiveMetastore_get_delegation_token_result { void __set_success(const std::string& val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_get_delegation_token_result & rhs) const @@ -15250,6 +15818,7 @@ class ThriftHiveMetastore_renew_delegation_token_args { void __set_token_str_form(const std::string& val) { token_str_form = val; + __isset.token_str_form = true; } bool operator == (const ThriftHiveMetastore_renew_delegation_token_args & rhs) const @@ -15303,10 +15872,12 @@ class ThriftHiveMetastore_renew_delegation_token_result { void __set_success(const int64_t val) { success = val; + __isset.success = true; } void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_renew_delegation_token_result & rhs) const @@ -15368,6 +15939,7 @@ class ThriftHiveMetastore_cancel_delegation_token_args { void __set_token_str_form(const std::string& val) { token_str_form = val; + __isset.token_str_form = true; } bool operator == (const ThriftHiveMetastore_cancel_delegation_token_args & rhs) const @@ -15419,6 +15991,7 @@ class ThriftHiveMetastore_cancel_delegation_token_result { void __set_o1(const MetaException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_cancel_delegation_token_result & rhs) const @@ -15513,6 +16086,7 @@ class ThriftHiveMetastore_get_open_txns_result { void __set_success(const GetOpenTxnsResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_get_open_txns_result & rhs) const @@ -15607,6 +16181,7 @@ class ThriftHiveMetastore_get_open_txns_info_result { void __set_success(const GetOpenTxnsInfoResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_get_open_txns_info_result & rhs) const @@ -15664,6 +16239,7 @@ class ThriftHiveMetastore_open_txns_args { void __set_rqst(const OpenTxnRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_open_txns_args & rhs) const @@ -15715,6 +16291,7 @@ class ThriftHiveMetastore_open_txns_result { void __set_success(const OpenTxnsResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_open_txns_result & rhs) const @@ -15772,6 +16349,7 @@ class ThriftHiveMetastore_abort_txn_args { void __set_rqst(const AbortTxnRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_abort_txn_args & rhs) const @@ -15823,6 +16401,7 @@ class ThriftHiveMetastore_abort_txn_result { void __set_o1(const NoSuchTxnException& val) { o1 = val; + __isset.o1 = true; } bool operator == (const ThriftHiveMetastore_abort_txn_result & rhs) const @@ -15880,6 +16459,7 @@ class ThriftHiveMetastore_commit_txn_args { void __set_rqst(const CommitTxnRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_commit_txn_args & rhs) const @@ -15933,10 +16513,12 @@ class ThriftHiveMetastore_commit_txn_result { void __set_o1(const NoSuchTxnException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const TxnAbortedException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_commit_txn_result & rhs) const @@ -15998,6 +16580,7 @@ class ThriftHiveMetastore_lock_args { void __set_rqst(const LockRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_lock_args & rhs) const @@ -16053,14 +16636,17 @@ class ThriftHiveMetastore_lock_result { void __set_success(const LockResponse& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchTxnException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const TxnAbortedException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_lock_result & rhs) const @@ -16126,6 +16712,7 @@ class ThriftHiveMetastore_check_lock_args { void __set_rqst(const CheckLockRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_check_lock_args & rhs) const @@ -16183,18 +16770,22 @@ class ThriftHiveMetastore_check_lock_result { void __set_success(const LockResponse& val) { success = val; + __isset.success = true; } void __set_o1(const NoSuchTxnException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const TxnAbortedException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const NoSuchLockException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_check_lock_result & rhs) const @@ -16264,6 +16855,7 @@ class ThriftHiveMetastore_unlock_args { void __set_rqst(const UnlockRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_unlock_args & rhs) const @@ -16317,10 +16909,12 @@ class ThriftHiveMetastore_unlock_result { void __set_o1(const NoSuchLockException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const TxnOpenException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_unlock_result & rhs) const @@ -16382,6 +16976,7 @@ class ThriftHiveMetastore_show_locks_args { void __set_rqst(const ShowLocksRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_show_locks_args & rhs) const @@ -16433,6 +17028,7 @@ class ThriftHiveMetastore_show_locks_result { void __set_success(const ShowLocksResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_show_locks_result & rhs) const @@ -16490,6 +17086,7 @@ class ThriftHiveMetastore_heartbeat_args { void __set_ids(const HeartbeatRequest& val) { ids = val; + __isset.ids = true; } bool operator == (const ThriftHiveMetastore_heartbeat_args & rhs) const @@ -16545,14 +17142,17 @@ class ThriftHiveMetastore_heartbeat_result { void __set_o1(const NoSuchLockException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const NoSuchTxnException& val) { o2 = val; + __isset.o2 = true; } void __set_o3(const TxnAbortedException& val) { o3 = val; + __isset.o3 = true; } bool operator == (const ThriftHiveMetastore_heartbeat_result & rhs) const @@ -16618,6 +17218,7 @@ class ThriftHiveMetastore_heartbeat_txn_range_args { void __set_txns(const HeartbeatTxnRangeRequest& val) { txns = val; + __isset.txns = true; } bool operator == (const ThriftHiveMetastore_heartbeat_txn_range_args & rhs) const @@ -16669,6 +17270,7 @@ class ThriftHiveMetastore_heartbeat_txn_range_result { void __set_success(const HeartbeatTxnRangeResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_heartbeat_txn_range_result & rhs) const @@ -16726,6 +17328,7 @@ class ThriftHiveMetastore_compact_args { void __set_rqst(const CompactionRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_compact_args & rhs) const @@ -16814,6 +17417,7 @@ class ThriftHiveMetastore_show_compact_args { void __set_rqst(const ShowCompactRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_show_compact_args & rhs) const @@ -16865,6 +17469,7 @@ class ThriftHiveMetastore_show_compact_result { void __set_success(const ShowCompactResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_show_compact_result & rhs) const @@ -16922,6 +17527,7 @@ class ThriftHiveMetastore_add_dynamic_partitions_args { void __set_rqst(const AddDynamicPartitions& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_add_dynamic_partitions_args & rhs) const @@ -16975,10 +17581,12 @@ class ThriftHiveMetastore_add_dynamic_partitions_result { void __set_o1(const NoSuchTxnException& val) { o1 = val; + __isset.o1 = true; } void __set_o2(const TxnAbortedException& val) { o2 = val; + __isset.o2 = true; } bool operator == (const ThriftHiveMetastore_add_dynamic_partitions_result & rhs) const @@ -17040,6 +17648,7 @@ class ThriftHiveMetastore_get_next_notification_args { void __set_rqst(const NotificationEventRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_get_next_notification_args & rhs) const @@ -17091,6 +17700,7 @@ class ThriftHiveMetastore_get_next_notification_result { void __set_success(const NotificationEventResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_get_next_notification_result & rhs) const @@ -17185,6 +17795,7 @@ class ThriftHiveMetastore_get_current_notificationEventId_result { void __set_success(const CurrentNotificationEventId& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_get_current_notificationEventId_result & rhs) const @@ -17242,6 +17853,7 @@ class ThriftHiveMetastore_fire_listener_event_args { void __set_rqst(const FireEventRequest& val) { rqst = val; + __isset.rqst = true; } bool operator == (const ThriftHiveMetastore_fire_listener_event_args & rhs) const @@ -17293,6 +17905,7 @@ class ThriftHiveMetastore_fire_listener_event_result { void __set_success(const FireEventResponse& val) { success = val; + __isset.success = true; } bool operator == (const ThriftHiveMetastore_fire_listener_event_result & rhs) const diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 749c97a..dc51fa3 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -201,12 +201,15 @@ uint32_t Version::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Version::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Version"); + ++fcnt; xfer += oprot->writeFieldBegin("version", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->version); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->comments); xfer += oprot->writeFieldEnd(); @@ -284,16 +287,20 @@ uint32_t FieldSchema::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t FieldSchema::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("FieldSchema"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->type); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->comment); xfer += oprot->writeFieldEnd(); @@ -392,23 +399,28 @@ uint32_t Type::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Type::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Type"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); if (this->__isset.type1) { + ++fcnt; xfer += oprot->writeFieldBegin("type1", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->type1); xfer += oprot->writeFieldEnd(); } if (this->__isset.type2) { + ++fcnt; xfer += oprot->writeFieldBegin("type2", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->type2); xfer += oprot->writeFieldEnd(); } if (this->__isset.fields) { + ++fcnt; xfer += oprot->writeFieldBegin("fields", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fields.size())); @@ -526,20 +538,25 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HiveObjectRef"); + ++fcnt; xfer += oprot->writeFieldBegin("objectType", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->objectType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("objectName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->objectName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("partValues", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partValues.size())); @@ -552,6 +569,7 @@ uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) cons } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("columnName", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->columnName); xfer += oprot->writeFieldEnd(); @@ -650,24 +668,30 @@ uint32_t PrivilegeGrantInfo::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t PrivilegeGrantInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PrivilegeGrantInfo"); + ++fcnt; xfer += oprot->writeFieldBegin("privilege", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->privilege); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->grantor); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)this->grantorType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 5); xfer += oprot->writeBool(this->grantOption); xfer += oprot->writeFieldEnd(); @@ -758,20 +782,25 @@ uint32_t HiveObjectPrivilege::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t HiveObjectPrivilege::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HiveObjectPrivilege"); + ++fcnt; xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->hiveObject.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->principalName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)this->principalType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantInfo", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->grantInfo.write(oprot); xfer += oprot->writeFieldEnd(); @@ -847,8 +876,10 @@ uint32_t PrivilegeBag::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t PrivilegeBag::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PrivilegeBag"); + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->privileges.size())); @@ -1014,8 +1045,10 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PrincipalPrivilegeSet"); + ++fcnt; xfer += oprot->writeFieldBegin("userPrivileges", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->userPrivileges.size())); @@ -1037,6 +1070,7 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("groupPrivileges", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->groupPrivileges.size())); @@ -1058,6 +1092,7 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("rolePrivileges", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->rolePrivileges.size())); @@ -1155,17 +1190,21 @@ uint32_t GrantRevokePrivilegeRequest::read(::apache::thrift::protocol::TProtocol uint32_t GrantRevokePrivilegeRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GrantRevokePrivilegeRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("requestType", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->requestType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.revokeGrantOption) { + ++fcnt; xfer += oprot->writeFieldBegin("revokeGrantOption", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool(this->revokeGrantOption); xfer += oprot->writeFieldEnd(); @@ -1228,9 +1267,11 @@ uint32_t GrantRevokePrivilegeResponse::read(::apache::thrift::protocol::TProtoco uint32_t GrantRevokePrivilegeResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GrantRevokePrivilegeResponse"); if (this->__isset.success) { + ++fcnt; xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 1); xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); @@ -1307,16 +1348,20 @@ uint32_t Role::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Role::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Role"); + ++fcnt; xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->roleName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("ownerName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->ownerName); xfer += oprot->writeFieldEnd(); @@ -1431,32 +1476,40 @@ uint32_t RolePrincipalGrant::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t RolePrincipalGrant::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("RolePrincipalGrant"); + ++fcnt; xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->roleName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->principalName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)this->principalType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->grantOption); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantTime", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->grantTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantorName", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->grantorName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("grantorPrincipalType", ::apache::thrift::protocol::T_I32, 7); xfer += oprot->writeI32((int32_t)this->grantorPrincipalType); xfer += oprot->writeFieldEnd(); @@ -1539,12 +1592,15 @@ uint32_t GetRoleGrantsForPrincipalRequest::read(::apache::thrift::protocol::TPro uint32_t GetRoleGrantsForPrincipalRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GetRoleGrantsForPrincipalRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->principal_name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->principal_type); xfer += oprot->writeFieldEnd(); @@ -1620,8 +1676,10 @@ uint32_t GetRoleGrantsForPrincipalResponse::read(::apache::thrift::protocol::TPr uint32_t GetRoleGrantsForPrincipalResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GetRoleGrantsForPrincipalResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->principalGrants.size())); @@ -1692,8 +1750,10 @@ uint32_t GetPrincipalsInRoleRequest::read(::apache::thrift::protocol::TProtocol* uint32_t GetPrincipalsInRoleRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GetPrincipalsInRoleRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->roleName); xfer += oprot->writeFieldEnd(); @@ -1768,8 +1828,10 @@ uint32_t GetPrincipalsInRoleResponse::read(::apache::thrift::protocol::TProtocol uint32_t GetPrincipalsInRoleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GetPrincipalsInRoleResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->principalGrants.size())); @@ -1891,35 +1953,43 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr uint32_t GrantRevokeRoleRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GrantRevokeRoleRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("requestType", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->requestType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->roleName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->principalName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)this->principalType); xfer += oprot->writeFieldEnd(); if (this->__isset.grantor) { + ++fcnt; xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->grantor); xfer += oprot->writeFieldEnd(); } if (this->__isset.grantorType) { + ++fcnt; xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32((int32_t)this->grantorType); xfer += oprot->writeFieldEnd(); } if (this->__isset.grantOption) { + ++fcnt; xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->grantOption); xfer += oprot->writeFieldEnd(); @@ -1986,9 +2056,11 @@ uint32_t GrantRevokeRoleResponse::read(::apache::thrift::protocol::TProtocol* ip uint32_t GrantRevokeRoleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GrantRevokeRoleResponse"); if (this->__isset.success) { + ++fcnt; xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 1); xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); @@ -2114,20 +2186,25 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Database"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->description); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("locationUri", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->locationUri); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -2142,16 +2219,19 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); if (this->__isset.privileges) { + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 5); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.ownerName) { + ++fcnt; xfer += oprot->writeFieldBegin("ownerName", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->ownerName); xfer += oprot->writeFieldEnd(); } if (this->__isset.ownerType) { + ++fcnt; xfer += oprot->writeFieldBegin("ownerType", ::apache::thrift::protocol::T_I32, 7); xfer += oprot->writeI32((int32_t)this->ownerType); xfer += oprot->writeFieldEnd(); @@ -2249,16 +2329,20 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("SerDeInfo"); + ++fcnt; xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("serializationLib", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->serializationLib); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -2338,12 +2422,15 @@ uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Order::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Order"); + ++fcnt; xfer += oprot->writeFieldBegin("col", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->col); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("order", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->order); xfer += oprot->writeFieldEnd(); @@ -2484,8 +2571,10 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("SkewedInfo"); + ++fcnt; xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->skewedColNames.size())); @@ -2498,6 +2587,7 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast(this->skewedColValues.size())); @@ -2518,6 +2608,7 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast(this->skewedColValueLocationMaps.size())); @@ -2736,8 +2827,10 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("StorageDescriptor"); + ++fcnt; xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); @@ -2750,30 +2843,37 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("location", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->location); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("inputFormat", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->inputFormat); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("outputFormat", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->outputFormat); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("compressed", ::apache::thrift::protocol::T_BOOL, 5); xfer += oprot->writeBool(this->compressed); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numBuckets", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32(this->numBuckets); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("serdeInfo", ::apache::thrift::protocol::T_STRUCT, 7); xfer += this->serdeInfo.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->bucketCols.size())); @@ -2786,6 +2886,7 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sortCols.size())); @@ -2798,6 +2899,7 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -2812,11 +2914,13 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldEnd(); if (this->__isset.skewedInfo) { + ++fcnt; xfer += oprot->writeFieldBegin("skewedInfo", ::apache::thrift::protocol::T_STRUCT, 11); xfer += this->skewedInfo.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.storedAsSubDirectories) { + ++fcnt; xfer += oprot->writeFieldBegin("storedAsSubDirectories", ::apache::thrift::protocol::T_BOOL, 12); xfer += oprot->writeBool(this->storedAsSubDirectories); xfer += oprot->writeFieldEnd(); @@ -3019,36 +3123,45 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Table"); + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("owner", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->owner); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->lastAccessTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("retention", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32(this->retention); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 7); xfer += this->sd.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); @@ -3061,6 +3174,7 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -3074,24 +3188,29 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("viewOriginalText", ::apache::thrift::protocol::T_STRING, 10); xfer += oprot->writeString(this->viewOriginalText); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("viewExpandedText", ::apache::thrift::protocol::T_STRING, 11); xfer += oprot->writeString(this->viewExpandedText); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 12); xfer += oprot->writeString(this->tableType); xfer += oprot->writeFieldEnd(); if (this->__isset.privileges) { + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 13); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.temporary) { + ++fcnt; xfer += oprot->writeFieldBegin("temporary", ::apache::thrift::protocol::T_BOOL, 14); xfer += oprot->writeBool(this->temporary); xfer += oprot->writeFieldEnd(); @@ -3248,8 +3367,10 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Partition"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); @@ -3262,26 +3383,32 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->lastAccessTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 6); xfer += this->sd.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -3296,6 +3423,7 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); if (this->__isset.privileges) { + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 8); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3430,8 +3558,10 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionWithoutSD"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); @@ -3444,18 +3574,22 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32(this->lastAccessTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("relativePath", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->relativePath); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -3470,6 +3604,7 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldEnd(); if (this->__isset.privileges) { + ++fcnt; xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 6); xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3555,8 +3690,10 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionSpecWithSharedSD"); + ++fcnt; xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); @@ -3569,6 +3706,7 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->sd.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3642,8 +3780,10 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionListComposingSpec"); + ++fcnt; xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); @@ -3744,26 +3884,32 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionSpec"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("rootPath", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->rootPath); xfer += oprot->writeFieldEnd(); if (this->__isset.sharedSDPartitionSpec) { + ++fcnt; xfer += oprot->writeFieldBegin("sharedSDPartitionSpec", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->sharedSDPartitionSpec.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.partitionList) { + ++fcnt; xfer += oprot->writeFieldBegin("partitionList", ::apache::thrift::protocol::T_STRUCT, 5); xfer += this->partitionList.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3915,40 +4061,50 @@ uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Index"); + ++fcnt; xfer += oprot->writeFieldBegin("indexName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->indexName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("indexHandlerClass", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->indexHandlerClass); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("origTableName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->origTableName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32(this->lastAccessTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("indexTableName", ::apache::thrift::protocol::T_STRING, 7); xfer += oprot->writeString(this->indexTableName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 8); xfer += this->sd.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); @@ -3962,6 +4118,7 @@ uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("deferredRebuild", ::apache::thrift::protocol::T_BOOL, 10); xfer += oprot->writeBool(this->deferredRebuild); xfer += oprot->writeFieldEnd(); @@ -4056,16 +4213,20 @@ uint32_t BooleanColumnStatsData::read(::apache::thrift::protocol::TProtocol* ipr uint32_t BooleanColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("BooleanColumnStatsData"); + ++fcnt; xfer += oprot->writeFieldBegin("numTrues", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->numTrues); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numFalses", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->numFalses); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->numNulls); xfer += oprot->writeFieldEnd(); @@ -4157,22 +4318,27 @@ uint32_t DoubleColumnStatsData::read(::apache::thrift::protocol::TProtocol* ipro uint32_t DoubleColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("DoubleColumnStatsData"); if (this->__isset.lowValue) { + ++fcnt; xfer += oprot->writeFieldBegin("lowValue", ::apache::thrift::protocol::T_DOUBLE, 1); xfer += oprot->writeDouble(this->lowValue); xfer += oprot->writeFieldEnd(); } if (this->__isset.highValue) { + ++fcnt; xfer += oprot->writeFieldBegin("highValue", ::apache::thrift::protocol::T_DOUBLE, 2); xfer += oprot->writeDouble(this->highValue); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->numNulls); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4); xfer += oprot->writeI64(this->numDVs); xfer += oprot->writeFieldEnd(); @@ -4266,22 +4432,27 @@ uint32_t LongColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t LongColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("LongColumnStatsData"); if (this->__isset.lowValue) { + ++fcnt; xfer += oprot->writeFieldBegin("lowValue", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lowValue); xfer += oprot->writeFieldEnd(); } if (this->__isset.highValue) { + ++fcnt; xfer += oprot->writeFieldBegin("highValue", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->highValue); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->numNulls); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4); xfer += oprot->writeI64(this->numDVs); xfer += oprot->writeFieldEnd(); @@ -4381,20 +4552,25 @@ uint32_t StringColumnStatsData::read(::apache::thrift::protocol::TProtocol* ipro uint32_t StringColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("StringColumnStatsData"); + ++fcnt; xfer += oprot->writeFieldBegin("maxColLen", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->maxColLen); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("avgColLen", ::apache::thrift::protocol::T_DOUBLE, 2); xfer += oprot->writeDouble(this->avgColLen); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->numNulls); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4); xfer += oprot->writeI64(this->numDVs); xfer += oprot->writeFieldEnd(); @@ -4482,16 +4658,20 @@ uint32_t BinaryColumnStatsData::read(::apache::thrift::protocol::TProtocol* ipro uint32_t BinaryColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("BinaryColumnStatsData"); + ++fcnt; xfer += oprot->writeFieldBegin("maxColLen", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->maxColLen); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("avgColLen", ::apache::thrift::protocol::T_DOUBLE, 2); xfer += oprot->writeDouble(this->avgColLen); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->numNulls); xfer += oprot->writeFieldEnd(); @@ -4567,12 +4747,15 @@ uint32_t Decimal::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Decimal::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Decimal"); + ++fcnt; xfer += oprot->writeFieldBegin("unscaled", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeBinary(this->unscaled); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("scale", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->scale); xfer += oprot->writeFieldEnd(); @@ -4663,22 +4846,27 @@ uint32_t DecimalColumnStatsData::read(::apache::thrift::protocol::TProtocol* ipr uint32_t DecimalColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("DecimalColumnStatsData"); if (this->__isset.lowValue) { + ++fcnt; xfer += oprot->writeFieldBegin("lowValue", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->lowValue.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.highValue) { + ++fcnt; xfer += oprot->writeFieldBegin("highValue", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->highValue.write(oprot); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->numNulls); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4); xfer += oprot->writeI64(this->numDVs); xfer += oprot->writeFieldEnd(); @@ -4782,32 +4970,48 @@ uint32_t ColumnStatisticsData::read(::apache::thrift::protocol::TProtocol* iprot uint32_t ColumnStatisticsData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ColumnStatisticsData"); - xfer += oprot->writeFieldBegin("booleanStats", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->booleanStats.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("longStats", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->longStats.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("doubleStats", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->doubleStats.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("stringStats", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->stringStats.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("binaryStats", ::apache::thrift::protocol::T_STRUCT, 5); - xfer += this->binaryStats.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("decimalStats", ::apache::thrift::protocol::T_STRUCT, 6); - xfer += this->decimalStats.write(oprot); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.booleanStats) { + ++fcnt; + xfer += oprot->writeFieldBegin("booleanStats", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->booleanStats.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.longStats) { + ++fcnt; + xfer += oprot->writeFieldBegin("longStats", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->longStats.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.doubleStats) { + ++fcnt; + xfer += oprot->writeFieldBegin("doubleStats", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->doubleStats.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.stringStats) { + ++fcnt; + xfer += oprot->writeFieldBegin("stringStats", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->stringStats.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.binaryStats) { + ++fcnt; + xfer += oprot->writeFieldBegin("binaryStats", ::apache::thrift::protocol::T_STRUCT, 5); + xfer += this->binaryStats.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.decimalStats) { + ++fcnt; + xfer += oprot->writeFieldBegin("decimalStats", ::apache::thrift::protocol::T_STRUCT, 6); + xfer += this->decimalStats.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4894,16 +5098,20 @@ uint32_t ColumnStatisticsObj::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t ColumnStatisticsObj::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ColumnStatisticsObj"); + ++fcnt; xfer += oprot->writeFieldBegin("colName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->colName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("colType", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->colType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("statsData", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->statsData.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5006,26 +5214,32 @@ uint32_t ColumnStatisticsDesc::read(::apache::thrift::protocol::TProtocol* iprot uint32_t ColumnStatisticsDesc::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ColumnStatisticsDesc"); + ++fcnt; xfer += oprot->writeFieldBegin("isTblLevel", ::apache::thrift::protocol::T_BOOL, 1); xfer += oprot->writeBool(this->isTblLevel); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); if (this->__isset.partName) { + ++fcnt; xfer += oprot->writeFieldBegin("partName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->partName); xfer += oprot->writeFieldEnd(); } if (this->__isset.lastAnalyzed) { + ++fcnt; xfer += oprot->writeFieldBegin("lastAnalyzed", ::apache::thrift::protocol::T_I64, 5); xfer += oprot->writeI64(this->lastAnalyzed); xfer += oprot->writeFieldEnd(); @@ -5116,12 +5330,15 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ColumnStatistics"); + ++fcnt; xfer += oprot->writeFieldBegin("statsDesc", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->statsDesc.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); @@ -5216,8 +5433,10 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("AggrStats"); + ++fcnt; xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); @@ -5230,6 +5449,7 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("partsFound", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->partsFound); xfer += oprot->writeFieldEnd(); @@ -5305,8 +5525,10 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("SetPartitionsStatsRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); @@ -5409,8 +5631,10 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Schema"); + ++fcnt; xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); @@ -5423,6 +5647,7 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); @@ -5508,8 +5733,10 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("EnvironmentContext"); + ++fcnt; xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); @@ -5605,8 +5832,10 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionsByExprResult"); + ++fcnt; xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); @@ -5619,6 +5848,7 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hasUnknownPartitions", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool(this->hasUnknownPartitions); xfer += oprot->writeFieldEnd(); @@ -5720,26 +5950,32 @@ uint32_t PartitionsByExprRequest::read(::apache::thrift::protocol::TProtocol* ip uint32_t PartitionsByExprRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionsByExprRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("expr", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeBinary(this->expr); xfer += oprot->writeFieldEnd(); if (this->__isset.defaultPartitionName) { + ++fcnt; xfer += oprot->writeFieldBegin("defaultPartitionName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->defaultPartitionName); xfer += oprot->writeFieldEnd(); } if (this->__isset.maxParts) { + ++fcnt; xfer += oprot->writeFieldBegin("maxParts", ::apache::thrift::protocol::T_I16, 5); xfer += oprot->writeI16(this->maxParts); xfer += oprot->writeFieldEnd(); @@ -5819,8 +6055,10 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TableStatsResult"); + ++fcnt; xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); @@ -5918,8 +6156,10 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionsStatsResult"); + ++fcnt; xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); @@ -6033,16 +6273,20 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TableStatsRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); @@ -6172,16 +6416,20 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PartitionsStatsRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); @@ -6194,6 +6442,7 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); @@ -6276,9 +6525,11 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("AddPartitionsResult"); if (this->__isset.partitions) { + ++fcnt; xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); @@ -6403,16 +6654,20 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("AddPartitionsRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); @@ -6425,11 +6680,13 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("ifNotExists", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->ifNotExists); xfer += oprot->writeFieldEnd(); if (this->__isset.needResult) { + ++fcnt; xfer += oprot->writeFieldBegin("needResult", ::apache::thrift::protocol::T_BOOL, 5); xfer += oprot->writeBool(this->needResult); xfer += oprot->writeFieldEnd(); @@ -6506,9 +6763,11 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("DropPartitionsResult"); if (this->__isset.partitions) { + ++fcnt; xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); @@ -6588,13 +6847,16 @@ uint32_t DropPartitionsExpr::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t DropPartitionsExpr::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("DropPartitionsExpr"); + ++fcnt; xfer += oprot->writeFieldBegin("expr", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeBinary(this->expr); xfer += oprot->writeFieldEnd(); if (this->__isset.partArchiveLevel) { + ++fcnt; xfer += oprot->writeFieldBegin("partArchiveLevel", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->partArchiveLevel); xfer += oprot->writeFieldEnd(); @@ -6688,32 +6950,40 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("RequestPartsSpec"); - xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter331; - for (_iter331 = this->names.begin(); _iter331 != this->names.end(); ++_iter331) + if (this->__isset.names) { + ++fcnt; + xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { - xfer += oprot->writeString((*_iter331)); + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); + std::vector ::const_iterator _iter331; + for (_iter331 = this->names.begin(); _iter331 != this->names.end(); ++_iter331) + { + xfer += oprot->writeString((*_iter331)); + } + xfer += oprot->writeListEnd(); } - xfer += oprot->writeListEnd(); + xfer += oprot->writeFieldEnd(); } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter332; - for (_iter332 = this->exprs.begin(); _iter332 != this->exprs.end(); ++_iter332) + if (this->__isset.exprs) { + ++fcnt; + xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { - xfer += (*_iter332).write(oprot); + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); + std::vector ::const_iterator _iter332; + for (_iter332 = this->exprs.begin(); _iter332 != this->exprs.end(); ++_iter332) + { + xfer += (*_iter332).write(oprot); + } + xfer += oprot->writeListEnd(); } - xfer += oprot->writeListEnd(); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); } - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6836,41 +7106,50 @@ uint32_t DropPartitionsRequest::read(::apache::thrift::protocol::TProtocol* ipro uint32_t DropPartitionsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("DropPartitionsRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->parts.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.deleteData) { + ++fcnt; xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->deleteData); xfer += oprot->writeFieldEnd(); } if (this->__isset.ifExists) { + ++fcnt; xfer += oprot->writeFieldBegin("ifExists", ::apache::thrift::protocol::T_BOOL, 5); xfer += oprot->writeBool(this->ifExists); xfer += oprot->writeFieldEnd(); } if (this->__isset.ignoreProtection) { + ++fcnt; xfer += oprot->writeFieldBegin("ignoreProtection", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool(this->ignoreProtection); xfer += oprot->writeFieldEnd(); } if (this->__isset.environmentContext) { + ++fcnt; xfer += oprot->writeFieldBegin("environmentContext", ::apache::thrift::protocol::T_STRUCT, 7); xfer += this->environmentContext.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.needResult) { + ++fcnt; xfer += oprot->writeFieldBegin("needResult", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->needResult); xfer += oprot->writeFieldEnd(); @@ -6948,12 +7227,15 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t ResourceUri::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ResourceUri"); + ++fcnt; xfer += oprot->writeFieldBegin("resourceType", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->resourceType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("uri", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->uri); xfer += oprot->writeFieldEnd(); @@ -7087,36 +7369,45 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Function"); + ++fcnt; xfer += oprot->writeFieldBegin("functionName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->functionName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("className", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->className); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("ownerName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->ownerName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("ownerType", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32((int32_t)this->ownerType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32(this->createTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("functionType", ::apache::thrift::protocol::T_I32, 7); xfer += oprot->writeI32((int32_t)this->functionType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); @@ -7230,20 +7521,25 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TxnInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TxnInfo"); + ++fcnt; xfer += oprot->writeFieldBegin("id", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->id); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->state); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->user); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->hostname); xfer += oprot->writeFieldEnd(); @@ -7332,12 +7628,15 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GetOpenTxnsInfoResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("txn_high_water_mark", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->txn_high_water_mark); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); @@ -7433,12 +7732,15 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("GetOpenTxnsResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("txn_high_water_mark", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->txn_high_water_mark); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); @@ -7532,16 +7834,20 @@ uint32_t OpenTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t OpenTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("OpenTxnRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("num_txns", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->num_txns); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->user); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->hostname); xfer += oprot->writeFieldEnd(); @@ -7618,8 +7924,10 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("OpenTxnsResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); @@ -7690,8 +7998,10 @@ uint32_t AbortTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t AbortTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("AbortTxnRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->txnid); xfer += oprot->writeFieldEnd(); @@ -7754,8 +8064,10 @@ uint32_t CommitTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t CommitTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("CommitTxnRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->txnid); xfer += oprot->writeFieldEnd(); @@ -7860,26 +8172,32 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t LockComponent::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("LockComponent"); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->type); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("level", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->level); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); if (this->__isset.tablename) { + ++fcnt; xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->tablename); xfer += oprot->writeFieldEnd(); } if (this->__isset.partitionname) { + ++fcnt; xfer += oprot->writeFieldBegin("partitionname", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->partitionname); xfer += oprot->writeFieldEnd(); @@ -7989,8 +8307,10 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("LockRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); @@ -8004,14 +8324,17 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldEnd(); if (this->__isset.txnid) { + ++fcnt; xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->txnid); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->user); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->hostname); xfer += oprot->writeFieldEnd(); @@ -8091,12 +8414,15 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t LockResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("LockResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lockid); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->state); xfer += oprot->writeFieldEnd(); @@ -8160,8 +8486,10 @@ uint32_t CheckLockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t CheckLockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("CheckLockRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lockid); xfer += oprot->writeFieldEnd(); @@ -8224,8 +8552,10 @@ uint32_t UnlockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t UnlockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("UnlockRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lockid); xfer += oprot->writeFieldEnd(); @@ -8272,6 +8602,7 @@ uint32_t ShowLocksRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t ShowLocksRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ShowLocksRequest"); xfer += oprot->writeFieldStop(); @@ -8435,52 +8766,64 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i uint32_t ShowLocksResponseElement::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ShowLocksResponseElement"); + ++fcnt; xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lockid); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); if (this->__isset.tablename) { + ++fcnt; xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tablename); xfer += oprot->writeFieldEnd(); } if (this->__isset.partname) { + ++fcnt; xfer += oprot->writeFieldBegin("partname", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->partname); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32((int32_t)this->state); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32((int32_t)this->type); xfer += oprot->writeFieldEnd(); if (this->__isset.txnid) { + ++fcnt; xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 7); xfer += oprot->writeI64(this->txnid); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("lastheartbeat", ::apache::thrift::protocol::T_I64, 8); xfer += oprot->writeI64(this->lastheartbeat); xfer += oprot->writeFieldEnd(); if (this->__isset.acquiredat) { + ++fcnt; xfer += oprot->writeFieldBegin("acquiredat", ::apache::thrift::protocol::T_I64, 9); xfer += oprot->writeI64(this->acquiredat); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 10); xfer += oprot->writeString(this->user); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 11); xfer += oprot->writeString(this->hostname); xfer += oprot->writeFieldEnd(); @@ -8563,8 +8906,10 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ShowLocksResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); @@ -8641,14 +8986,17 @@ uint32_t HeartbeatRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t HeartbeatRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HeartbeatRequest"); if (this->__isset.lockid) { + ++fcnt; xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lockid); xfer += oprot->writeFieldEnd(); } if (this->__isset.txnid) { + ++fcnt; xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->txnid); xfer += oprot->writeFieldEnd(); @@ -8724,12 +9072,15 @@ uint32_t HeartbeatTxnRangeRequest::read(::apache::thrift::protocol::TProtocol* i uint32_t HeartbeatTxnRangeRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HeartbeatTxnRangeRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("min", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->min); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("max", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->max); xfer += oprot->writeFieldEnd(); @@ -8830,8 +9181,10 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HeartbeatTxnRangeResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); @@ -8844,6 +9197,7 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); @@ -8955,26 +9309,32 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("CompactionRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tablename); xfer += oprot->writeFieldEnd(); if (this->__isset.partitionname) { + ++fcnt; xfer += oprot->writeFieldBegin("partitionname", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->partitionname); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)this->type); xfer += oprot->writeFieldEnd(); if (this->__isset.runas) { + ++fcnt; xfer += oprot->writeFieldBegin("runas", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->runas); xfer += oprot->writeFieldEnd(); @@ -9026,6 +9386,7 @@ uint32_t ShowCompactRequest::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t ShowCompactRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ShowCompactRequest"); xfer += oprot->writeFieldStop(); @@ -9154,40 +9515,49 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* uint32_t ShowCompactResponseElement::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ShowCompactResponseElement"); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->tablename); xfer += oprot->writeFieldEnd(); if (this->__isset.partitionname) { + ++fcnt; xfer += oprot->writeFieldBegin("partitionname", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->partitionname); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32((int32_t)this->type); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->state); xfer += oprot->writeFieldEnd(); if (this->__isset.workerid) { + ++fcnt; xfer += oprot->writeFieldBegin("workerid", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->workerid); xfer += oprot->writeFieldEnd(); } if (this->__isset.start) { + ++fcnt; xfer += oprot->writeFieldBegin("start", ::apache::thrift::protocol::T_I64, 7); xfer += oprot->writeI64(this->start); xfer += oprot->writeFieldEnd(); } if (this->__isset.runAs) { + ++fcnt; xfer += oprot->writeFieldBegin("runAs", ::apache::thrift::protocol::T_STRING, 8); xfer += oprot->writeString(this->runAs); xfer += oprot->writeFieldEnd(); @@ -9270,8 +9640,10 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ShowCompactResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); @@ -9387,20 +9759,25 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("AddDynamicPartitions"); + ++fcnt; xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->txnid); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tablename); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); @@ -9482,13 +9859,16 @@ uint32_t NotificationEventRequest::read(::apache::thrift::protocol::TProtocol* i uint32_t NotificationEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("NotificationEventRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("lastEvent", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->lastEvent); xfer += oprot->writeFieldEnd(); if (this->__isset.maxEvents) { + ++fcnt; xfer += oprot->writeFieldBegin("maxEvents", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->maxEvents); xfer += oprot->writeFieldEnd(); @@ -9602,30 +9982,37 @@ uint32_t NotificationEvent::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t NotificationEvent::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("NotificationEvent"); + ++fcnt; xfer += oprot->writeFieldBegin("eventId", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->eventId); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("eventTime", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->eventTime); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("eventType", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->eventType); xfer += oprot->writeFieldEnd(); if (this->__isset.dbName) { + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); } if (this->__isset.tableName) { + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -9706,8 +10093,10 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("NotificationEventResponse"); + ++fcnt; xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); @@ -9778,8 +10167,10 @@ uint32_t CurrentNotificationEventId::read(::apache::thrift::protocol::TProtocol* uint32_t CurrentNotificationEventId::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("CurrentNotificationEventId"); + ++fcnt; xfer += oprot->writeFieldBegin("eventId", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->eventId); xfer += oprot->writeFieldEnd(); @@ -9854,8 +10245,10 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("InsertEventRequestData"); + ++fcnt; xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); @@ -9923,12 +10316,18 @@ uint32_t FireEventRequestData::read(::apache::thrift::protocol::TProtocol* iprot uint32_t FireEventRequestData::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("FireEventRequestData"); - xfer += oprot->writeFieldBegin("insertData", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->insertData.write(oprot); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.insertData) { + ++fcnt; + xfer += oprot->writeFieldBegin("insertData", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->insertData.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -10035,27 +10434,33 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("FireEventRequest"); + ++fcnt; xfer += oprot->writeFieldBegin("successful", ::apache::thrift::protocol::T_BOOL, 1); xfer += oprot->writeBool(this->successful); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("data", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->data.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.dbName) { + ++fcnt; xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); } if (this->__isset.tableName) { + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); } if (this->__isset.partitionVals) { + ++fcnt; xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); @@ -10115,6 +10520,7 @@ uint32_t FireEventResponse::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t FireEventResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("FireEventResponse"); xfer += oprot->writeFieldStop(); @@ -10173,8 +10579,10 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("MetaException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10235,8 +10643,10 @@ uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* ipro uint32_t UnknownTableException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("UnknownTableException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10297,8 +10707,10 @@ uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t UnknownDBException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("UnknownDBException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10359,8 +10771,10 @@ uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* ipr uint32_t AlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("AlreadyExistsException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10421,8 +10835,10 @@ uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* uint32_t InvalidPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("InvalidPartitionException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10483,8 +10899,10 @@ uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* uint32_t UnknownPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("UnknownPartitionException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10545,8 +10963,10 @@ uint32_t InvalidObjectException::read(::apache::thrift::protocol::TProtocol* ipr uint32_t InvalidObjectException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("InvalidObjectException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10607,8 +11027,10 @@ uint32_t NoSuchObjectException::read(::apache::thrift::protocol::TProtocol* ipro uint32_t NoSuchObjectException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("NoSuchObjectException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10669,8 +11091,10 @@ uint32_t IndexAlreadyExistsException::read(::apache::thrift::protocol::TProtocol uint32_t IndexAlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("IndexAlreadyExistsException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10731,8 +11155,10 @@ uint32_t InvalidOperationException::read(::apache::thrift::protocol::TProtocol* uint32_t InvalidOperationException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("InvalidOperationException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10793,8 +11219,10 @@ uint32_t ConfigValSecurityException::read(::apache::thrift::protocol::TProtocol* uint32_t ConfigValSecurityException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ConfigValSecurityException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10855,8 +11283,10 @@ uint32_t InvalidInputException::read(::apache::thrift::protocol::TProtocol* ipro uint32_t InvalidInputException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("InvalidInputException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10917,8 +11347,10 @@ uint32_t NoSuchTxnException::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t NoSuchTxnException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("NoSuchTxnException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -10979,8 +11411,10 @@ uint32_t TxnAbortedException::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TxnAbortedException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TxnAbortedException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -11041,8 +11475,10 @@ uint32_t TxnOpenException::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TxnOpenException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TxnOpenException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); @@ -11103,8 +11539,10 @@ uint32_t NoSuchLockException::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t NoSuchLockException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("NoSuchLockException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 55e0385..a5408df 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -158,10 +158,12 @@ class Version { void __set_version(const std::string& val) { version = val; + __isset.version = true; } void __set_comments(const std::string& val) { comments = val; + __isset.comments = true; } bool operator == (const Version & rhs) const @@ -211,14 +213,17 @@ class FieldSchema { void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_type(const std::string& val) { type = val; + __isset.type = true; } void __set_comment(const std::string& val) { comment = val; + __isset.comment = true; } bool operator == (const FieldSchema & rhs) const @@ -272,6 +277,7 @@ class Type { void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_type1(const std::string& val) { @@ -350,22 +356,27 @@ class HiveObjectRef { void __set_objectType(const HiveObjectType::type val) { objectType = val; + __isset.objectType = true; } void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_objectName(const std::string& val) { objectName = val; + __isset.objectName = true; } void __set_partValues(const std::vector & val) { partValues = val; + __isset.partValues = true; } void __set_columnName(const std::string& val) { columnName = val; + __isset.columnName = true; } bool operator == (const HiveObjectRef & rhs) const @@ -425,22 +436,27 @@ class PrivilegeGrantInfo { void __set_privilege(const std::string& val) { privilege = val; + __isset.privilege = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_grantor(const std::string& val) { grantor = val; + __isset.grantor = true; } void __set_grantorType(const PrincipalType::type val) { grantorType = val; + __isset.grantorType = true; } void __set_grantOption(const bool val) { grantOption = val; + __isset.grantOption = true; } bool operator == (const PrivilegeGrantInfo & rhs) const @@ -498,18 +514,22 @@ class HiveObjectPrivilege { void __set_hiveObject(const HiveObjectRef& val) { hiveObject = val; + __isset.hiveObject = true; } void __set_principalName(const std::string& val) { principalName = val; + __isset.principalName = true; } void __set_principalType(const PrincipalType::type val) { principalType = val; + __isset.principalType = true; } void __set_grantInfo(const PrivilegeGrantInfo& val) { grantInfo = val; + __isset.grantInfo = true; } bool operator == (const HiveObjectPrivilege & rhs) const @@ -559,6 +579,7 @@ class PrivilegeBag { void __set_privileges(const std::vector & val) { privileges = val; + __isset.privileges = true; } bool operator == (const PrivilegeBag & rhs) const @@ -606,14 +627,17 @@ class PrincipalPrivilegeSet { void __set_userPrivileges(const std::map > & val) { userPrivileges = val; + __isset.userPrivileges = true; } void __set_groupPrivileges(const std::map > & val) { groupPrivileges = val; + __isset.groupPrivileges = true; } void __set_rolePrivileges(const std::map > & val) { rolePrivileges = val; + __isset.rolePrivileges = true; } bool operator == (const PrincipalPrivilegeSet & rhs) const @@ -665,10 +689,12 @@ class GrantRevokePrivilegeRequest { void __set_requestType(const GrantRevokeType::type val) { requestType = val; + __isset.requestType = true; } void __set_privileges(const PrivilegeBag& val) { privileges = val; + __isset.privileges = true; } void __set_revokeGrantOption(const bool val) { @@ -773,14 +799,17 @@ class Role { void __set_roleName(const std::string& val) { roleName = val; + __isset.roleName = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_ownerName(const std::string& val) { ownerName = val; + __isset.ownerName = true; } bool operator == (const Role & rhs) const @@ -840,30 +869,37 @@ class RolePrincipalGrant { void __set_roleName(const std::string& val) { roleName = val; + __isset.roleName = true; } void __set_principalName(const std::string& val) { principalName = val; + __isset.principalName = true; } void __set_principalType(const PrincipalType::type val) { principalType = val; + __isset.principalType = true; } void __set_grantOption(const bool val) { grantOption = val; + __isset.grantOption = true; } void __set_grantTime(const int32_t val) { grantTime = val; + __isset.grantTime = true; } void __set_grantorName(const std::string& val) { grantorName = val; + __isset.grantorName = true; } void __set_grantorPrincipalType(const PrincipalType::type val) { grantorPrincipalType = val; + __isset.grantorPrincipalType = true; } bool operator == (const RolePrincipalGrant & rhs) const @@ -1086,18 +1122,22 @@ class GrantRevokeRoleRequest { void __set_requestType(const GrantRevokeType::type val) { requestType = val; + __isset.requestType = true; } void __set_roleName(const std::string& val) { roleName = val; + __isset.roleName = true; } void __set_principalName(const std::string& val) { principalName = val; + __isset.principalName = true; } void __set_principalType(const PrincipalType::type val) { principalType = val; + __isset.principalType = true; } void __set_grantor(const std::string& val) { @@ -1232,18 +1272,22 @@ class Database { void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_description(const std::string& val) { description = val; + __isset.description = true; } void __set_locationUri(const std::string& val) { locationUri = val; + __isset.locationUri = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } void __set_privileges(const PrincipalPrivilegeSet& val) { @@ -1324,14 +1368,17 @@ class SerDeInfo { void __set_name(const std::string& val) { name = val; + __isset.name = true; } void __set_serializationLib(const std::string& val) { serializationLib = val; + __isset.serializationLib = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } bool operator == (const SerDeInfo & rhs) const @@ -1381,10 +1428,12 @@ class Order { void __set_col(const std::string& val) { col = val; + __isset.col = true; } void __set_order(const int32_t val) { order = val; + __isset.order = true; } bool operator == (const Order & rhs) const @@ -1434,14 +1483,17 @@ class SkewedInfo { void __set_skewedColNames(const std::vector & val) { skewedColNames = val; + __isset.skewedColNames = true; } void __set_skewedColValues(const std::vector > & val) { skewedColValues = val; + __isset.skewedColValues = true; } void __set_skewedColValueLocationMaps(const std::map , std::string> & val) { skewedColValueLocationMaps = val; + __isset.skewedColValueLocationMaps = true; } bool operator == (const SkewedInfo & rhs) const @@ -1511,42 +1563,52 @@ class StorageDescriptor { void __set_cols(const std::vector & val) { cols = val; + __isset.cols = true; } void __set_location(const std::string& val) { location = val; + __isset.location = true; } void __set_inputFormat(const std::string& val) { inputFormat = val; + __isset.inputFormat = true; } void __set_outputFormat(const std::string& val) { outputFormat = val; + __isset.outputFormat = true; } void __set_compressed(const bool val) { compressed = val; + __isset.compressed = true; } void __set_numBuckets(const int32_t val) { numBuckets = val; + __isset.numBuckets = true; } void __set_serdeInfo(const SerDeInfo& val) { serdeInfo = val; + __isset.serdeInfo = true; } void __set_bucketCols(const std::vector & val) { bucketCols = val; + __isset.bucketCols = true; } void __set_sortCols(const std::vector & val) { sortCols = val; + __isset.sortCols = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } void __set_skewedInfo(const SkewedInfo& val) { @@ -1652,50 +1714,62 @@ class Table { void __set_tableName(const std::string& val) { tableName = val; + __isset.tableName = true; } void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_owner(const std::string& val) { owner = val; + __isset.owner = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_lastAccessTime(const int32_t val) { lastAccessTime = val; + __isset.lastAccessTime = true; } void __set_retention(const int32_t val) { retention = val; + __isset.retention = true; } void __set_sd(const StorageDescriptor& val) { sd = val; + __isset.sd = true; } void __set_partitionKeys(const std::vector & val) { partitionKeys = val; + __isset.partitionKeys = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } void __set_viewOriginalText(const std::string& val) { viewOriginalText = val; + __isset.viewOriginalText = true; } void __set_viewExpandedText(const std::string& val) { viewExpandedText = val; + __isset.viewExpandedText = true; } void __set_tableType(const std::string& val) { tableType = val; + __isset.tableType = true; } void __set_privileges(const PrincipalPrivilegeSet& val) { @@ -1793,30 +1867,37 @@ class Partition { void __set_values(const std::vector & val) { values = val; + __isset.values = true; } void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_tableName(const std::string& val) { tableName = val; + __isset.tableName = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_lastAccessTime(const int32_t val) { lastAccessTime = val; + __isset.lastAccessTime = true; } void __set_sd(const StorageDescriptor& val) { sd = val; + __isset.sd = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } void __set_privileges(const PrincipalPrivilegeSet& val) { @@ -1891,22 +1972,27 @@ class PartitionWithoutSD { void __set_values(const std::vector & val) { values = val; + __isset.values = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_lastAccessTime(const int32_t val) { lastAccessTime = val; + __isset.lastAccessTime = true; } void __set_relativePath(const std::string& val) { relativePath = val; + __isset.relativePath = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } void __set_privileges(const PrincipalPrivilegeSet& val) { @@ -1969,10 +2055,12 @@ class PartitionSpecWithSharedSD { void __set_partitions(const std::vector & val) { partitions = val; + __isset.partitions = true; } void __set_sd(const StorageDescriptor& val) { sd = val; + __isset.sd = true; } bool operator == (const PartitionSpecWithSharedSD & rhs) const @@ -2018,6 +2106,7 @@ class PartitionListComposingSpec { void __set_partitions(const std::vector & val) { partitions = val; + __isset.partitions = true; } bool operator == (const PartitionListComposingSpec & rhs) const @@ -2069,14 +2158,17 @@ class PartitionSpec { void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_tableName(const std::string& val) { tableName = val; + __isset.tableName = true; } void __set_rootPath(const std::string& val) { rootPath = val; + __isset.rootPath = true; } void __set_sharedSDPartitionSpec(const PartitionSpecWithSharedSD& val) { @@ -2160,42 +2252,52 @@ class Index { void __set_indexName(const std::string& val) { indexName = val; + __isset.indexName = true; } void __set_indexHandlerClass(const std::string& val) { indexHandlerClass = val; + __isset.indexHandlerClass = true; } void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_origTableName(const std::string& val) { origTableName = val; + __isset.origTableName = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_lastAccessTime(const int32_t val) { lastAccessTime = val; + __isset.lastAccessTime = true; } void __set_indexTableName(const std::string& val) { indexTableName = val; + __isset.indexTableName = true; } void __set_sd(const StorageDescriptor& val) { sd = val; + __isset.sd = true; } void __set_parameters(const std::map & val) { parameters = val; + __isset.parameters = true; } void __set_deferredRebuild(const bool val) { deferredRebuild = val; + __isset.deferredRebuild = true; } bool operator == (const Index & rhs) const @@ -2684,41 +2786,65 @@ class ColumnStatisticsData { void __set_booleanStats(const BooleanColumnStatsData& val) { booleanStats = val; + __isset = _ColumnStatisticsData__isset(); + __isset.booleanStats = true; } void __set_longStats(const LongColumnStatsData& val) { longStats = val; + __isset = _ColumnStatisticsData__isset(); + __isset.longStats = true; } void __set_doubleStats(const DoubleColumnStatsData& val) { doubleStats = val; + __isset = _ColumnStatisticsData__isset(); + __isset.doubleStats = true; } void __set_stringStats(const StringColumnStatsData& val) { stringStats = val; + __isset = _ColumnStatisticsData__isset(); + __isset.stringStats = true; } void __set_binaryStats(const BinaryColumnStatsData& val) { binaryStats = val; + __isset = _ColumnStatisticsData__isset(); + __isset.binaryStats = true; } void __set_decimalStats(const DecimalColumnStatsData& val) { decimalStats = val; + __isset = _ColumnStatisticsData__isset(); + __isset.decimalStats = true; } bool operator == (const ColumnStatisticsData & rhs) const { - if (!(booleanStats == rhs.booleanStats)) + if (__isset.booleanStats != rhs.__isset.booleanStats) + return false; + else if (__isset.booleanStats && !(booleanStats == rhs.booleanStats)) + return false; + if (__isset.longStats != rhs.__isset.longStats) + return false; + else if (__isset.longStats && !(longStats == rhs.longStats)) + return false; + if (__isset.doubleStats != rhs.__isset.doubleStats) + return false; + else if (__isset.doubleStats && !(doubleStats == rhs.doubleStats)) return false; - if (!(longStats == rhs.longStats)) + if (__isset.stringStats != rhs.__isset.stringStats) return false; - if (!(doubleStats == rhs.doubleStats)) + else if (__isset.stringStats && !(stringStats == rhs.stringStats)) return false; - if (!(stringStats == rhs.stringStats)) + if (__isset.binaryStats != rhs.__isset.binaryStats) return false; - if (!(binaryStats == rhs.binaryStats)) + else if (__isset.binaryStats && !(binaryStats == rhs.binaryStats)) return false; - if (!(decimalStats == rhs.decimalStats)) + if (__isset.decimalStats != rhs.__isset.decimalStats) + return false; + else if (__isset.decimalStats && !(decimalStats == rhs.decimalStats)) return false; return true; } @@ -3013,10 +3139,12 @@ class Schema { void __set_fieldSchemas(const std::vector & val) { fieldSchemas = val; + __isset.fieldSchemas = true; } void __set_properties(const std::map & val) { properties = val; + __isset.properties = true; } bool operator == (const Schema & rhs) const @@ -3062,6 +3190,7 @@ class EnvironmentContext { void __set_properties(const std::map & val) { properties = val; + __isset.properties = true; } bool operator == (const EnvironmentContext & rhs) const @@ -3631,17 +3760,25 @@ class RequestPartsSpec { void __set_names(const std::vector & val) { names = val; + __isset = _RequestPartsSpec__isset(); + __isset.names = true; } void __set_exprs(const std::vector & val) { exprs = val; + __isset = _RequestPartsSpec__isset(); + __isset.exprs = true; } bool operator == (const RequestPartsSpec & rhs) const { - if (!(names == rhs.names)) + if (__isset.names != rhs.__isset.names) + return false; + else if (__isset.names && !(names == rhs.names)) return false; - if (!(exprs == rhs.exprs)) + if (__isset.exprs != rhs.__isset.exprs) + return false; + else if (__isset.exprs && !(exprs == rhs.exprs)) return false; return true; } @@ -3793,10 +3930,12 @@ class ResourceUri { void __set_resourceType(const ResourceType::type val) { resourceType = val; + __isset.resourceType = true; } void __set_uri(const std::string& val) { uri = val; + __isset.uri = true; } bool operator == (const ResourceUri & rhs) const @@ -3856,34 +3995,42 @@ class Function { void __set_functionName(const std::string& val) { functionName = val; + __isset.functionName = true; } void __set_dbName(const std::string& val) { dbName = val; + __isset.dbName = true; } void __set_className(const std::string& val) { className = val; + __isset.className = true; } void __set_ownerName(const std::string& val) { ownerName = val; + __isset.ownerName = true; } void __set_ownerType(const PrincipalType::type val) { ownerType = val; + __isset.ownerType = true; } void __set_createTime(const int32_t val) { createTime = val; + __isset.createTime = true; } void __set_functionType(const FunctionType::type val) { functionType = val; + __isset.functionType = true; } void __set_resourceUris(const std::vector & val) { resourceUris = val; + __isset.resourceUris = true; } bool operator == (const Function & rhs) const @@ -4670,6 +4817,7 @@ class ShowLocksResponse { void __set_locks(const std::vector & val) { locks = val; + __isset.locks = true; } bool operator == (const ShowLocksResponse & rhs) const @@ -5417,11 +5565,15 @@ class FireEventRequestData { void __set_insertData(const InsertEventRequestData& val) { insertData = val; + __isset = _FireEventRequestData__isset(); + __isset.insertData = true; } bool operator == (const FireEventRequestData & rhs) const { - if (!(insertData == rhs.insertData)) + if (__isset.insertData != rhs.__isset.insertData) + return false; + else if (__isset.insertData && !(insertData == rhs.insertData)) return false; return true; } @@ -5572,6 +5724,7 @@ class MetaException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const MetaException & rhs) const @@ -5615,6 +5768,7 @@ class UnknownTableException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const UnknownTableException & rhs) const @@ -5658,6 +5812,7 @@ class UnknownDBException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const UnknownDBException & rhs) const @@ -5701,6 +5856,7 @@ class AlreadyExistsException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const AlreadyExistsException & rhs) const @@ -5744,6 +5900,7 @@ class InvalidPartitionException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const InvalidPartitionException & rhs) const @@ -5787,6 +5944,7 @@ class UnknownPartitionException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const UnknownPartitionException & rhs) const @@ -5830,6 +5988,7 @@ class InvalidObjectException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const InvalidObjectException & rhs) const @@ -5873,6 +6032,7 @@ class NoSuchObjectException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const NoSuchObjectException & rhs) const @@ -5916,6 +6076,7 @@ class IndexAlreadyExistsException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const IndexAlreadyExistsException & rhs) const @@ -5959,6 +6120,7 @@ class InvalidOperationException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const InvalidOperationException & rhs) const @@ -6002,6 +6164,7 @@ class ConfigValSecurityException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const ConfigValSecurityException & rhs) const @@ -6045,6 +6208,7 @@ class InvalidInputException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const InvalidInputException & rhs) const @@ -6088,6 +6252,7 @@ class NoSuchTxnException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const NoSuchTxnException & rhs) const @@ -6131,6 +6296,7 @@ class TxnAbortedException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const TxnAbortedException & rhs) const @@ -6174,6 +6340,7 @@ class TxnOpenException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const TxnOpenException & rhs) const @@ -6217,6 +6384,7 @@ class NoSuchLockException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } bool operator == (const NoSuchLockException & rhs) const diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote old mode 100644 new mode 100755 diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 4cc54e8..19de09d 100644 --- metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -5585,7 +5585,7 @@ def process_getMetaConf(self, seqid, iprot, oprot): result = getMetaConf_result() try: result.success = self._handler.getMetaConf(args.key) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("getMetaConf", TMessageType.REPLY, seqid) result.write(oprot) @@ -5599,7 +5599,7 @@ def process_setMetaConf(self, seqid, iprot, oprot): result = setMetaConf_result() try: self._handler.setMetaConf(args.key, args.value) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("setMetaConf", TMessageType.REPLY, seqid) result.write(oprot) @@ -5613,11 +5613,11 @@ def process_create_database(self, seqid, iprot, oprot): result = create_database_result() try: self._handler.create_database(args.database) - except AlreadyExistsException as o1: + except AlreadyExistsException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("create_database", TMessageType.REPLY, seqid) result.write(oprot) @@ -5631,9 +5631,9 @@ def process_get_database(self, seqid, iprot, oprot): result = get_database_result() try: result.success = self._handler.get_database(args.name) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_database", TMessageType.REPLY, seqid) result.write(oprot) @@ -5647,11 +5647,11 @@ def process_drop_database(self, seqid, iprot, oprot): result = drop_database_result() try: self._handler.drop_database(args.name, args.deleteData, args.cascade) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except InvalidOperationException as o2: + except InvalidOperationException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("drop_database", TMessageType.REPLY, seqid) result.write(oprot) @@ -5665,7 +5665,7 @@ def process_get_databases(self, seqid, iprot, oprot): result = get_databases_result() try: result.success = self._handler.get_databases(args.pattern) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_databases", TMessageType.REPLY, seqid) result.write(oprot) @@ -5679,7 +5679,7 @@ def process_get_all_databases(self, seqid, iprot, oprot): result = get_all_databases_result() try: result.success = self._handler.get_all_databases() - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_all_databases", TMessageType.REPLY, seqid) result.write(oprot) @@ -5693,9 +5693,9 @@ def process_alter_database(self, seqid, iprot, oprot): result = alter_database_result() try: self._handler.alter_database(args.dbname, args.db) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_database", TMessageType.REPLY, seqid) result.write(oprot) @@ -5709,9 +5709,9 @@ def process_get_type(self, seqid, iprot, oprot): result = get_type_result() try: result.success = self._handler.get_type(args.name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -5725,11 +5725,11 @@ def process_create_type(self, seqid, iprot, oprot): result = create_type_result() try: result.success = self._handler.create_type(args.type) - except AlreadyExistsException as o1: + except AlreadyExistsException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("create_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -5743,9 +5743,9 @@ def process_drop_type(self, seqid, iprot, oprot): result = drop_type_result() try: result.success = self._handler.drop_type(args.type) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_type", TMessageType.REPLY, seqid) result.write(oprot) @@ -5759,7 +5759,7 @@ def process_get_type_all(self, seqid, iprot, oprot): result = get_type_all_result() try: result.success = self._handler.get_type_all(args.name) - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_type_all", TMessageType.REPLY, seqid) result.write(oprot) @@ -5773,11 +5773,11 @@ def process_get_fields(self, seqid, iprot, oprot): result = get_fields_result() try: result.success = self._handler.get_fields(args.db_name, args.table_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except UnknownTableException as o2: + except UnknownTableException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 oprot.writeMessageBegin("get_fields", TMessageType.REPLY, seqid) result.write(oprot) @@ -5791,11 +5791,11 @@ def process_get_fields_with_environment_context(self, seqid, iprot, oprot): result = get_fields_with_environment_context_result() try: result.success = self._handler.get_fields_with_environment_context(args.db_name, args.table_name, args.environment_context) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except UnknownTableException as o2: + except UnknownTableException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 oprot.writeMessageBegin("get_fields_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -5809,11 +5809,11 @@ def process_get_schema(self, seqid, iprot, oprot): result = get_schema_result() try: result.success = self._handler.get_schema(args.db_name, args.table_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except UnknownTableException as o2: + except UnknownTableException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 oprot.writeMessageBegin("get_schema", TMessageType.REPLY, seqid) result.write(oprot) @@ -5827,11 +5827,11 @@ def process_get_schema_with_environment_context(self, seqid, iprot, oprot): result = get_schema_with_environment_context_result() try: result.success = self._handler.get_schema_with_environment_context(args.db_name, args.table_name, args.environment_context) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except UnknownTableException as o2: + except UnknownTableException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 oprot.writeMessageBegin("get_schema_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -5845,13 +5845,13 @@ def process_create_table(self, seqid, iprot, oprot): result = create_table_result() try: self._handler.create_table(args.tbl) - except AlreadyExistsException as o1: + except AlreadyExistsException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 - except NoSuchObjectException as o4: + except NoSuchObjectException, o4: result.o4 = o4 oprot.writeMessageBegin("create_table", TMessageType.REPLY, seqid) result.write(oprot) @@ -5865,13 +5865,13 @@ def process_create_table_with_environment_context(self, seqid, iprot, oprot): result = create_table_with_environment_context_result() try: self._handler.create_table_with_environment_context(args.tbl, args.environment_context) - except AlreadyExistsException as o1: + except AlreadyExistsException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 - except NoSuchObjectException as o4: + except NoSuchObjectException, o4: result.o4 = o4 oprot.writeMessageBegin("create_table_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -5885,9 +5885,9 @@ def process_drop_table(self, seqid, iprot, oprot): result = drop_table_result() try: self._handler.drop_table(args.dbname, args.name, args.deleteData) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("drop_table", TMessageType.REPLY, seqid) result.write(oprot) @@ -5901,9 +5901,9 @@ def process_drop_table_with_environment_context(self, seqid, iprot, oprot): result = drop_table_with_environment_context_result() try: self._handler.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("drop_table_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -5917,7 +5917,7 @@ def process_get_tables(self, seqid, iprot, oprot): result = get_tables_result() try: result.success = self._handler.get_tables(args.db_name, args.pattern) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_tables", TMessageType.REPLY, seqid) result.write(oprot) @@ -5931,7 +5931,7 @@ def process_get_all_tables(self, seqid, iprot, oprot): result = get_all_tables_result() try: result.success = self._handler.get_all_tables(args.db_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_all_tables", TMessageType.REPLY, seqid) result.write(oprot) @@ -5945,9 +5945,9 @@ def process_get_table(self, seqid, iprot, oprot): result = get_table_result() try: result.success = self._handler.get_table(args.dbname, args.tbl_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_table", TMessageType.REPLY, seqid) result.write(oprot) @@ -5961,11 +5961,11 @@ def process_get_table_objects_by_name(self, seqid, iprot, oprot): result = get_table_objects_by_name_result() try: result.success = self._handler.get_table_objects_by_name(args.dbname, args.tbl_names) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except InvalidOperationException as o2: + except InvalidOperationException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 oprot.writeMessageBegin("get_table_objects_by_name", TMessageType.REPLY, seqid) result.write(oprot) @@ -5979,11 +5979,11 @@ def process_get_table_names_by_filter(self, seqid, iprot, oprot): result = get_table_names_by_filter_result() try: result.success = self._handler.get_table_names_by_filter(args.dbname, args.filter, args.max_tables) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except InvalidOperationException as o2: + except InvalidOperationException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 oprot.writeMessageBegin("get_table_names_by_filter", TMessageType.REPLY, seqid) result.write(oprot) @@ -5997,9 +5997,9 @@ def process_alter_table(self, seqid, iprot, oprot): result = alter_table_result() try: self._handler.alter_table(args.dbname, args.tbl_name, args.new_tbl) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_table", TMessageType.REPLY, seqid) result.write(oprot) @@ -6013,9 +6013,9 @@ def process_alter_table_with_environment_context(self, seqid, iprot, oprot): result = alter_table_with_environment_context_result() try: self._handler.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_table_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6029,9 +6029,9 @@ def process_alter_table_with_cascade(self, seqid, iprot, oprot): result = alter_table_with_cascade_result() try: self._handler.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_table_with_cascade", TMessageType.REPLY, seqid) result.write(oprot) @@ -6045,11 +6045,11 @@ def process_add_partition(self, seqid, iprot, oprot): result = add_partition_result() try: result.success = self._handler.add_partition(args.new_part) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("add_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6063,11 +6063,11 @@ def process_add_partition_with_environment_context(self, seqid, iprot, oprot): result = add_partition_with_environment_context_result() try: result.success = self._handler.add_partition_with_environment_context(args.new_part, args.environment_context) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("add_partition_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6081,11 +6081,11 @@ def process_add_partitions(self, seqid, iprot, oprot): result = add_partitions_result() try: result.success = self._handler.add_partitions(args.new_parts) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("add_partitions", TMessageType.REPLY, seqid) result.write(oprot) @@ -6099,11 +6099,11 @@ def process_add_partitions_pspec(self, seqid, iprot, oprot): result = add_partitions_pspec_result() try: result.success = self._handler.add_partitions_pspec(args.new_parts) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("add_partitions_pspec", TMessageType.REPLY, seqid) result.write(oprot) @@ -6117,11 +6117,11 @@ def process_append_partition(self, seqid, iprot, oprot): result = append_partition_result() try: result.success = self._handler.append_partition(args.db_name, args.tbl_name, args.part_vals) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("append_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6135,11 +6135,11 @@ def process_add_partitions_req(self, seqid, iprot, oprot): result = add_partitions_req_result() try: result.success = self._handler.add_partitions_req(args.request) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("add_partitions_req", TMessageType.REPLY, seqid) result.write(oprot) @@ -6153,11 +6153,11 @@ def process_append_partition_with_environment_context(self, seqid, iprot, oprot) result = append_partition_with_environment_context_result() try: result.success = self._handler.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("append_partition_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6171,11 +6171,11 @@ def process_append_partition_by_name(self, seqid, iprot, oprot): result = append_partition_by_name_result() try: result.success = self._handler.append_partition_by_name(args.db_name, args.tbl_name, args.part_name) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("append_partition_by_name", TMessageType.REPLY, seqid) result.write(oprot) @@ -6189,11 +6189,11 @@ def process_append_partition_by_name_with_environment_context(self, seqid, iprot result = append_partition_by_name_with_environment_context_result() try: result.success = self._handler.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("append_partition_by_name_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6207,9 +6207,9 @@ def process_drop_partition(self, seqid, iprot, oprot): result = drop_partition_result() try: result.success = self._handler.drop_partition(args.db_name, args.tbl_name, args.part_vals, args.deleteData) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6223,9 +6223,9 @@ def process_drop_partition_with_environment_context(self, seqid, iprot, oprot): result = drop_partition_with_environment_context_result() try: result.success = self._handler.drop_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.deleteData, args.environment_context) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_partition_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6239,9 +6239,9 @@ def process_drop_partition_by_name(self, seqid, iprot, oprot): result = drop_partition_by_name_result() try: result.success = self._handler.drop_partition_by_name(args.db_name, args.tbl_name, args.part_name, args.deleteData) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_partition_by_name", TMessageType.REPLY, seqid) result.write(oprot) @@ -6255,9 +6255,9 @@ def process_drop_partition_by_name_with_environment_context(self, seqid, iprot, result = drop_partition_by_name_with_environment_context_result() try: result.success = self._handler.drop_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.deleteData, args.environment_context) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_partition_by_name_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6271,9 +6271,9 @@ def process_drop_partitions_req(self, seqid, iprot, oprot): result = drop_partitions_req_result() try: result.success = self._handler.drop_partitions_req(args.req) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_partitions_req", TMessageType.REPLY, seqid) result.write(oprot) @@ -6287,9 +6287,9 @@ def process_get_partition(self, seqid, iprot, oprot): result = get_partition_result() try: result.success = self._handler.get_partition(args.db_name, args.tbl_name, args.part_vals) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6303,13 +6303,13 @@ def process_exchange_partition(self, seqid, iprot, oprot): result = exchange_partition_result() try: result.success = self._handler.exchange_partition(args.partitionSpecs, args.source_db, args.source_table_name, args.dest_db, args.dest_table_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 - except InvalidObjectException as o3: + except InvalidObjectException, o3: result.o3 = o3 - except InvalidInputException as o4: + except InvalidInputException, o4: result.o4 = o4 oprot.writeMessageBegin("exchange_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6323,9 +6323,9 @@ def process_get_partition_with_auth(self, seqid, iprot, oprot): result = get_partition_with_auth_result() try: result.success = self._handler.get_partition_with_auth(args.db_name, args.tbl_name, args.part_vals, args.user_name, args.group_names) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partition_with_auth", TMessageType.REPLY, seqid) result.write(oprot) @@ -6339,9 +6339,9 @@ def process_get_partition_by_name(self, seqid, iprot, oprot): result = get_partition_by_name_result() try: result.success = self._handler.get_partition_by_name(args.db_name, args.tbl_name, args.part_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partition_by_name", TMessageType.REPLY, seqid) result.write(oprot) @@ -6355,9 +6355,9 @@ def process_get_partitions(self, seqid, iprot, oprot): result = get_partitions_result() try: result.success = self._handler.get_partitions(args.db_name, args.tbl_name, args.max_parts) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions", TMessageType.REPLY, seqid) result.write(oprot) @@ -6371,9 +6371,9 @@ def process_get_partitions_with_auth(self, seqid, iprot, oprot): result = get_partitions_with_auth_result() try: result.success = self._handler.get_partitions_with_auth(args.db_name, args.tbl_name, args.max_parts, args.user_name, args.group_names) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_with_auth", TMessageType.REPLY, seqid) result.write(oprot) @@ -6387,9 +6387,9 @@ def process_get_partitions_pspec(self, seqid, iprot, oprot): result = get_partitions_pspec_result() try: result.success = self._handler.get_partitions_pspec(args.db_name, args.tbl_name, args.max_parts) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_pspec", TMessageType.REPLY, seqid) result.write(oprot) @@ -6403,7 +6403,7 @@ def process_get_partition_names(self, seqid, iprot, oprot): result = get_partition_names_result() try: result.success = self._handler.get_partition_names(args.db_name, args.tbl_name, args.max_parts) - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partition_names", TMessageType.REPLY, seqid) result.write(oprot) @@ -6417,9 +6417,9 @@ def process_get_partitions_ps(self, seqid, iprot, oprot): result = get_partitions_ps_result() try: result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_ps", TMessageType.REPLY, seqid) result.write(oprot) @@ -6433,9 +6433,9 @@ def process_get_partitions_ps_with_auth(self, seqid, iprot, oprot): result = get_partitions_ps_with_auth_result() try: result.success = self._handler.get_partitions_ps_with_auth(args.db_name, args.tbl_name, args.part_vals, args.max_parts, args.user_name, args.group_names) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_ps_with_auth", TMessageType.REPLY, seqid) result.write(oprot) @@ -6449,9 +6449,9 @@ def process_get_partition_names_ps(self, seqid, iprot, oprot): result = get_partition_names_ps_result() try: result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partition_names_ps", TMessageType.REPLY, seqid) result.write(oprot) @@ -6465,9 +6465,9 @@ def process_get_partitions_by_filter(self, seqid, iprot, oprot): result = get_partitions_by_filter_result() try: result.success = self._handler.get_partitions_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_by_filter", TMessageType.REPLY, seqid) result.write(oprot) @@ -6481,9 +6481,9 @@ def process_get_part_specs_by_filter(self, seqid, iprot, oprot): result = get_part_specs_by_filter_result() try: result.success = self._handler.get_part_specs_by_filter(args.db_name, args.tbl_name, args.filter, args.max_parts) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_part_specs_by_filter", TMessageType.REPLY, seqid) result.write(oprot) @@ -6497,9 +6497,9 @@ def process_get_partitions_by_expr(self, seqid, iprot, oprot): result = get_partitions_by_expr_result() try: result.success = self._handler.get_partitions_by_expr(args.req) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_by_expr", TMessageType.REPLY, seqid) result.write(oprot) @@ -6513,9 +6513,9 @@ def process_get_partitions_by_names(self, seqid, iprot, oprot): result = get_partitions_by_names_result() try: result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_by_names", TMessageType.REPLY, seqid) result.write(oprot) @@ -6529,9 +6529,9 @@ def process_alter_partition(self, seqid, iprot, oprot): result = alter_partition_result() try: self._handler.alter_partition(args.db_name, args.tbl_name, args.new_part) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6545,9 +6545,9 @@ def process_alter_partitions(self, seqid, iprot, oprot): result = alter_partitions_result() try: self._handler.alter_partitions(args.db_name, args.tbl_name, args.new_parts) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_partitions", TMessageType.REPLY, seqid) result.write(oprot) @@ -6561,9 +6561,9 @@ def process_alter_partition_with_environment_context(self, seqid, iprot, oprot): result = alter_partition_with_environment_context_result() try: self._handler.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_partition_with_environment_context", TMessageType.REPLY, seqid) result.write(oprot) @@ -6577,9 +6577,9 @@ def process_rename_partition(self, seqid, iprot, oprot): result = rename_partition_result() try: self._handler.rename_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("rename_partition", TMessageType.REPLY, seqid) result.write(oprot) @@ -6593,7 +6593,7 @@ def process_partition_name_has_valid_characters(self, seqid, iprot, oprot): result = partition_name_has_valid_characters_result() try: result.success = self._handler.partition_name_has_valid_characters(args.part_vals, args.throw_exception) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("partition_name_has_valid_characters", TMessageType.REPLY, seqid) result.write(oprot) @@ -6607,7 +6607,7 @@ def process_get_config_value(self, seqid, iprot, oprot): result = get_config_value_result() try: result.success = self._handler.get_config_value(args.name, args.defaultValue) - except ConfigValSecurityException as o1: + except ConfigValSecurityException, o1: result.o1 = o1 oprot.writeMessageBegin("get_config_value", TMessageType.REPLY, seqid) result.write(oprot) @@ -6621,7 +6621,7 @@ def process_partition_name_to_vals(self, seqid, iprot, oprot): result = partition_name_to_vals_result() try: result.success = self._handler.partition_name_to_vals(args.part_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("partition_name_to_vals", TMessageType.REPLY, seqid) result.write(oprot) @@ -6635,7 +6635,7 @@ def process_partition_name_to_spec(self, seqid, iprot, oprot): result = partition_name_to_spec_result() try: result.success = self._handler.partition_name_to_spec(args.part_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("partition_name_to_spec", TMessageType.REPLY, seqid) result.write(oprot) @@ -6649,17 +6649,17 @@ def process_markPartitionForEvent(self, seqid, iprot, oprot): result = markPartitionForEvent_result() try: self._handler.markPartitionForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 - except UnknownTableException as o4: + except UnknownTableException, o4: result.o4 = o4 - except UnknownPartitionException as o5: + except UnknownPartitionException, o5: result.o5 = o5 - except InvalidPartitionException as o6: + except InvalidPartitionException, o6: result.o6 = o6 oprot.writeMessageBegin("markPartitionForEvent", TMessageType.REPLY, seqid) result.write(oprot) @@ -6673,17 +6673,17 @@ def process_isPartitionMarkedForEvent(self, seqid, iprot, oprot): result = isPartitionMarkedForEvent_result() try: result.success = self._handler.isPartitionMarkedForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 - except UnknownDBException as o3: + except UnknownDBException, o3: result.o3 = o3 - except UnknownTableException as o4: + except UnknownTableException, o4: result.o4 = o4 - except UnknownPartitionException as o5: + except UnknownPartitionException, o5: result.o5 = o5 - except InvalidPartitionException as o6: + except InvalidPartitionException, o6: result.o6 = o6 oprot.writeMessageBegin("isPartitionMarkedForEvent", TMessageType.REPLY, seqid) result.write(oprot) @@ -6697,11 +6697,11 @@ def process_add_index(self, seqid, iprot, oprot): result = add_index_result() try: result.success = self._handler.add_index(args.new_index, args.index_table) - except InvalidObjectException as o1: + except InvalidObjectException, o1: result.o1 = o1 - except AlreadyExistsException as o2: + except AlreadyExistsException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("add_index", TMessageType.REPLY, seqid) result.write(oprot) @@ -6715,9 +6715,9 @@ def process_alter_index(self, seqid, iprot, oprot): result = alter_index_result() try: self._handler.alter_index(args.dbname, args.base_tbl_name, args.idx_name, args.new_idx) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_index", TMessageType.REPLY, seqid) result.write(oprot) @@ -6731,9 +6731,9 @@ def process_drop_index_by_name(self, seqid, iprot, oprot): result = drop_index_by_name_result() try: result.success = self._handler.drop_index_by_name(args.db_name, args.tbl_name, args.index_name, args.deleteData) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("drop_index_by_name", TMessageType.REPLY, seqid) result.write(oprot) @@ -6747,9 +6747,9 @@ def process_get_index_by_name(self, seqid, iprot, oprot): result = get_index_by_name_result() try: result.success = self._handler.get_index_by_name(args.db_name, args.tbl_name, args.index_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_index_by_name", TMessageType.REPLY, seqid) result.write(oprot) @@ -6763,9 +6763,9 @@ def process_get_indexes(self, seqid, iprot, oprot): result = get_indexes_result() try: result.success = self._handler.get_indexes(args.db_name, args.tbl_name, args.max_indexes) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_indexes", TMessageType.REPLY, seqid) result.write(oprot) @@ -6779,7 +6779,7 @@ def process_get_index_names(self, seqid, iprot, oprot): result = get_index_names_result() try: result.success = self._handler.get_index_names(args.db_name, args.tbl_name, args.max_indexes) - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_index_names", TMessageType.REPLY, seqid) result.write(oprot) @@ -6793,13 +6793,13 @@ def process_update_table_column_statistics(self, seqid, iprot, oprot): result = update_table_column_statistics_result() try: result.success = self._handler.update_table_column_statistics(args.stats_obj) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 - except InvalidInputException as o4: + except InvalidInputException, o4: result.o4 = o4 oprot.writeMessageBegin("update_table_column_statistics", TMessageType.REPLY, seqid) result.write(oprot) @@ -6813,13 +6813,13 @@ def process_update_partition_column_statistics(self, seqid, iprot, oprot): result = update_partition_column_statistics_result() try: result.success = self._handler.update_partition_column_statistics(args.stats_obj) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 - except InvalidInputException as o4: + except InvalidInputException, o4: result.o4 = o4 oprot.writeMessageBegin("update_partition_column_statistics", TMessageType.REPLY, seqid) result.write(oprot) @@ -6833,13 +6833,13 @@ def process_get_table_column_statistics(self, seqid, iprot, oprot): result = get_table_column_statistics_result() try: result.success = self._handler.get_table_column_statistics(args.db_name, args.tbl_name, args.col_name) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 - except InvalidInputException as o3: + except InvalidInputException, o3: result.o3 = o3 - except InvalidObjectException as o4: + except InvalidObjectException, o4: result.o4 = o4 oprot.writeMessageBegin("get_table_column_statistics", TMessageType.REPLY, seqid) result.write(oprot) @@ -6853,13 +6853,13 @@ def process_get_partition_column_statistics(self, seqid, iprot, oprot): result = get_partition_column_statistics_result() try: result.success = self._handler.get_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 - except InvalidInputException as o3: + except InvalidInputException, o3: result.o3 = o3 - except InvalidObjectException as o4: + except InvalidObjectException, o4: result.o4 = o4 oprot.writeMessageBegin("get_partition_column_statistics", TMessageType.REPLY, seqid) result.write(oprot) @@ -6873,9 +6873,9 @@ def process_get_table_statistics_req(self, seqid, iprot, oprot): result = get_table_statistics_req_result() try: result.success = self._handler.get_table_statistics_req(args.request) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_table_statistics_req", TMessageType.REPLY, seqid) result.write(oprot) @@ -6889,9 +6889,9 @@ def process_get_partitions_statistics_req(self, seqid, iprot, oprot): result = get_partitions_statistics_req_result() try: result.success = self._handler.get_partitions_statistics_req(args.request) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_partitions_statistics_req", TMessageType.REPLY, seqid) result.write(oprot) @@ -6905,9 +6905,9 @@ def process_get_aggr_stats_for(self, seqid, iprot, oprot): result = get_aggr_stats_for_result() try: result.success = self._handler.get_aggr_stats_for(args.request) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("get_aggr_stats_for", TMessageType.REPLY, seqid) result.write(oprot) @@ -6921,13 +6921,13 @@ def process_set_aggr_stats_for(self, seqid, iprot, oprot): result = set_aggr_stats_for_result() try: result.success = self._handler.set_aggr_stats_for(args.request) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 - except InvalidInputException as o4: + except InvalidInputException, o4: result.o4 = o4 oprot.writeMessageBegin("set_aggr_stats_for", TMessageType.REPLY, seqid) result.write(oprot) @@ -6941,13 +6941,13 @@ def process_delete_partition_column_statistics(self, seqid, iprot, oprot): result = delete_partition_column_statistics_result() try: result.success = self._handler.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 - except InvalidObjectException as o3: + except InvalidObjectException, o3: result.o3 = o3 - except InvalidInputException as o4: + except InvalidInputException, o4: result.o4 = o4 oprot.writeMessageBegin("delete_partition_column_statistics", TMessageType.REPLY, seqid) result.write(oprot) @@ -6961,13 +6961,13 @@ def process_delete_table_column_statistics(self, seqid, iprot, oprot): result = delete_table_column_statistics_result() try: result.success = self._handler.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 - except InvalidObjectException as o3: + except InvalidObjectException, o3: result.o3 = o3 - except InvalidInputException as o4: + except InvalidInputException, o4: result.o4 = o4 oprot.writeMessageBegin("delete_table_column_statistics", TMessageType.REPLY, seqid) result.write(oprot) @@ -6981,13 +6981,13 @@ def process_create_function(self, seqid, iprot, oprot): result = create_function_result() try: self._handler.create_function(args.func) - except AlreadyExistsException as o1: + except AlreadyExistsException, o1: result.o1 = o1 - except InvalidObjectException as o2: + except InvalidObjectException, o2: result.o2 = o2 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 - except NoSuchObjectException as o4: + except NoSuchObjectException, o4: result.o4 = o4 oprot.writeMessageBegin("create_function", TMessageType.REPLY, seqid) result.write(oprot) @@ -7001,9 +7001,9 @@ def process_drop_function(self, seqid, iprot, oprot): result = drop_function_result() try: self._handler.drop_function(args.dbName, args.funcName) - except NoSuchObjectException as o1: + except NoSuchObjectException, o1: result.o1 = o1 - except MetaException as o3: + except MetaException, o3: result.o3 = o3 oprot.writeMessageBegin("drop_function", TMessageType.REPLY, seqid) result.write(oprot) @@ -7017,9 +7017,9 @@ def process_alter_function(self, seqid, iprot, oprot): result = alter_function_result() try: self._handler.alter_function(args.dbName, args.funcName, args.newFunc) - except InvalidOperationException as o1: + except InvalidOperationException, o1: result.o1 = o1 - except MetaException as o2: + except MetaException, o2: result.o2 = o2 oprot.writeMessageBegin("alter_function", TMessageType.REPLY, seqid) result.write(oprot) @@ -7033,7 +7033,7 @@ def process_get_functions(self, seqid, iprot, oprot): result = get_functions_result() try: result.success = self._handler.get_functions(args.dbName, args.pattern) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_functions", TMessageType.REPLY, seqid) result.write(oprot) @@ -7047,9 +7047,9 @@ def process_get_function(self, seqid, iprot, oprot): result = get_function_result() try: result.success = self._handler.get_function(args.dbName, args.funcName) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 - except NoSuchObjectException as o2: + except NoSuchObjectException, o2: result.o2 = o2 oprot.writeMessageBegin("get_function", TMessageType.REPLY, seqid) result.write(oprot) @@ -7063,7 +7063,7 @@ def process_create_role(self, seqid, iprot, oprot): result = create_role_result() try: result.success = self._handler.create_role(args.role) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("create_role", TMessageType.REPLY, seqid) result.write(oprot) @@ -7077,7 +7077,7 @@ def process_drop_role(self, seqid, iprot, oprot): result = drop_role_result() try: result.success = self._handler.drop_role(args.role_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("drop_role", TMessageType.REPLY, seqid) result.write(oprot) @@ -7091,7 +7091,7 @@ def process_get_role_names(self, seqid, iprot, oprot): result = get_role_names_result() try: result.success = self._handler.get_role_names() - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_role_names", TMessageType.REPLY, seqid) result.write(oprot) @@ -7105,7 +7105,7 @@ def process_grant_role(self, seqid, iprot, oprot): result = grant_role_result() try: result.success = self._handler.grant_role(args.role_name, args.principal_name, args.principal_type, args.grantor, args.grantorType, args.grant_option) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("grant_role", TMessageType.REPLY, seqid) result.write(oprot) @@ -7119,7 +7119,7 @@ def process_revoke_role(self, seqid, iprot, oprot): result = revoke_role_result() try: result.success = self._handler.revoke_role(args.role_name, args.principal_name, args.principal_type) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("revoke_role", TMessageType.REPLY, seqid) result.write(oprot) @@ -7133,7 +7133,7 @@ def process_list_roles(self, seqid, iprot, oprot): result = list_roles_result() try: result.success = self._handler.list_roles(args.principal_name, args.principal_type) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("list_roles", TMessageType.REPLY, seqid) result.write(oprot) @@ -7147,7 +7147,7 @@ def process_grant_revoke_role(self, seqid, iprot, oprot): result = grant_revoke_role_result() try: result.success = self._handler.grant_revoke_role(args.request) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("grant_revoke_role", TMessageType.REPLY, seqid) result.write(oprot) @@ -7161,7 +7161,7 @@ def process_get_principals_in_role(self, seqid, iprot, oprot): result = get_principals_in_role_result() try: result.success = self._handler.get_principals_in_role(args.request) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_principals_in_role", TMessageType.REPLY, seqid) result.write(oprot) @@ -7175,7 +7175,7 @@ def process_get_role_grants_for_principal(self, seqid, iprot, oprot): result = get_role_grants_for_principal_result() try: result.success = self._handler.get_role_grants_for_principal(args.request) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_role_grants_for_principal", TMessageType.REPLY, seqid) result.write(oprot) @@ -7189,7 +7189,7 @@ def process_get_privilege_set(self, seqid, iprot, oprot): result = get_privilege_set_result() try: result.success = self._handler.get_privilege_set(args.hiveObject, args.user_name, args.group_names) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_privilege_set", TMessageType.REPLY, seqid) result.write(oprot) @@ -7203,7 +7203,7 @@ def process_list_privileges(self, seqid, iprot, oprot): result = list_privileges_result() try: result.success = self._handler.list_privileges(args.principal_name, args.principal_type, args.hiveObject) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("list_privileges", TMessageType.REPLY, seqid) result.write(oprot) @@ -7217,7 +7217,7 @@ def process_grant_privileges(self, seqid, iprot, oprot): result = grant_privileges_result() try: result.success = self._handler.grant_privileges(args.privileges) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("grant_privileges", TMessageType.REPLY, seqid) result.write(oprot) @@ -7231,7 +7231,7 @@ def process_revoke_privileges(self, seqid, iprot, oprot): result = revoke_privileges_result() try: result.success = self._handler.revoke_privileges(args.privileges) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("revoke_privileges", TMessageType.REPLY, seqid) result.write(oprot) @@ -7245,7 +7245,7 @@ def process_grant_revoke_privileges(self, seqid, iprot, oprot): result = grant_revoke_privileges_result() try: result.success = self._handler.grant_revoke_privileges(args.request) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("grant_revoke_privileges", TMessageType.REPLY, seqid) result.write(oprot) @@ -7259,7 +7259,7 @@ def process_set_ugi(self, seqid, iprot, oprot): result = set_ugi_result() try: result.success = self._handler.set_ugi(args.user_name, args.group_names) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("set_ugi", TMessageType.REPLY, seqid) result.write(oprot) @@ -7273,7 +7273,7 @@ def process_get_delegation_token(self, seqid, iprot, oprot): result = get_delegation_token_result() try: result.success = self._handler.get_delegation_token(args.token_owner, args.renewer_kerberos_principal_name) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("get_delegation_token", TMessageType.REPLY, seqid) result.write(oprot) @@ -7287,7 +7287,7 @@ def process_renew_delegation_token(self, seqid, iprot, oprot): result = renew_delegation_token_result() try: result.success = self._handler.renew_delegation_token(args.token_str_form) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("renew_delegation_token", TMessageType.REPLY, seqid) result.write(oprot) @@ -7301,7 +7301,7 @@ def process_cancel_delegation_token(self, seqid, iprot, oprot): result = cancel_delegation_token_result() try: self._handler.cancel_delegation_token(args.token_str_form) - except MetaException as o1: + except MetaException, o1: result.o1 = o1 oprot.writeMessageBegin("cancel_delegation_token", TMessageType.REPLY, seqid) result.write(oprot) @@ -7348,7 +7348,7 @@ def process_abort_txn(self, seqid, iprot, oprot): result = abort_txn_result() try: self._handler.abort_txn(args.rqst) - except NoSuchTxnException as o1: + except NoSuchTxnException, o1: result.o1 = o1 oprot.writeMessageBegin("abort_txn", TMessageType.REPLY, seqid) result.write(oprot) @@ -7362,9 +7362,9 @@ def process_commit_txn(self, seqid, iprot, oprot): result = commit_txn_result() try: self._handler.commit_txn(args.rqst) - except NoSuchTxnException as o1: + except NoSuchTxnException, o1: result.o1 = o1 - except TxnAbortedException as o2: + except TxnAbortedException, o2: result.o2 = o2 oprot.writeMessageBegin("commit_txn", TMessageType.REPLY, seqid) result.write(oprot) @@ -7378,9 +7378,9 @@ def process_lock(self, seqid, iprot, oprot): result = lock_result() try: result.success = self._handler.lock(args.rqst) - except NoSuchTxnException as o1: + except NoSuchTxnException, o1: result.o1 = o1 - except TxnAbortedException as o2: + except TxnAbortedException, o2: result.o2 = o2 oprot.writeMessageBegin("lock", TMessageType.REPLY, seqid) result.write(oprot) @@ -7394,11 +7394,11 @@ def process_check_lock(self, seqid, iprot, oprot): result = check_lock_result() try: result.success = self._handler.check_lock(args.rqst) - except NoSuchTxnException as o1: + except NoSuchTxnException, o1: result.o1 = o1 - except TxnAbortedException as o2: + except TxnAbortedException, o2: result.o2 = o2 - except NoSuchLockException as o3: + except NoSuchLockException, o3: result.o3 = o3 oprot.writeMessageBegin("check_lock", TMessageType.REPLY, seqid) result.write(oprot) @@ -7412,9 +7412,9 @@ def process_unlock(self, seqid, iprot, oprot): result = unlock_result() try: self._handler.unlock(args.rqst) - except NoSuchLockException as o1: + except NoSuchLockException, o1: result.o1 = o1 - except TxnOpenException as o2: + except TxnOpenException, o2: result.o2 = o2 oprot.writeMessageBegin("unlock", TMessageType.REPLY, seqid) result.write(oprot) @@ -7439,11 +7439,11 @@ def process_heartbeat(self, seqid, iprot, oprot): result = heartbeat_result() try: self._handler.heartbeat(args.ids) - except NoSuchLockException as o1: + except NoSuchLockException, o1: result.o1 = o1 - except NoSuchTxnException as o2: + except NoSuchTxnException, o2: result.o2 = o2 - except TxnAbortedException as o3: + except TxnAbortedException, o3: result.o3 = o3 oprot.writeMessageBegin("heartbeat", TMessageType.REPLY, seqid) result.write(oprot) @@ -7490,9 +7490,9 @@ def process_add_dynamic_partitions(self, seqid, iprot, oprot): result = add_dynamic_partitions_result() try: self._handler.add_dynamic_partitions(args.rqst) - except NoSuchTxnException as o1: + except NoSuchTxnException, o1: result.o1 = o1 - except TxnAbortedException as o2: + except TxnAbortedException, o2: result.o2 = o2 oprot.writeMessageBegin("add_dynamic_partitions", TMessageType.REPLY, seqid) result.write(oprot) diff --git ql/if/queryplan.thrift ql/if/queryplan.thrift index c8dfa35..7bf62c0 100644 --- ql/if/queryplan.thrift +++ ql/if/queryplan.thrift @@ -57,6 +57,7 @@ enum OperatorType { MUX, DEMUX, EVENT, + SPARKPRUNINGSINK, ORCFILEMERGE, RCFILEMERGE, MERGEJOIN, diff --git ql/src/gen/thrift/gen-cpp/queryplan_types.cpp ql/src/gen/thrift/gen-cpp/queryplan_types.cpp index 19d4806..33225ad 100644 --- ql/src/gen/thrift/gen-cpp/queryplan_types.cpp +++ ql/src/gen/thrift/gen-cpp/queryplan_types.cpp @@ -53,6 +53,7 @@ int _kOperatorTypeValues[] = { OperatorType::MUX, OperatorType::DEMUX, OperatorType::EVENT, + OperatorType::SPARKPRUNINGSINK, OperatorType::ORCFILEMERGE, OperatorType::RCFILEMERGE, OperatorType::MERGEJOIN @@ -80,11 +81,12 @@ const char* _kOperatorTypeNames[] = { "MUX", "DEMUX", "EVENT", + "SPARKPRUNINGSINK", "ORCFILEMERGE", "RCFILEMERGE", "MERGEJOIN" }; -const std::map _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(25, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); +const std::map _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(26, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); int _kTaskTypeValues[] = { TaskType::MAP, @@ -203,12 +205,15 @@ uint32_t Adjacency::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Adjacency::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Adjacency"); + ++fcnt; xfer += oprot->writeFieldBegin("node", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->node); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("children", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->children.size())); @@ -221,6 +226,7 @@ uint32_t Adjacency::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("adjacencyType", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32((int32_t)this->adjacencyType); xfer += oprot->writeFieldEnd(); @@ -325,12 +331,15 @@ uint32_t Graph::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Graph::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Graph"); + ++fcnt; xfer += oprot->writeFieldBegin("nodeType", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->nodeType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("roots", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->roots.size())); @@ -343,6 +352,7 @@ uint32_t Graph::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("adjacencyList", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->adjacencyList.size())); @@ -485,16 +495,20 @@ uint32_t Operator::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Operator::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Operator"); + ++fcnt; xfer += oprot->writeFieldBegin("operatorId", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->operatorId); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("operatorType", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->operatorType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("operatorAttributes", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->operatorAttributes.size())); @@ -508,6 +522,7 @@ uint32_t Operator::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("operatorCounters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I64, static_cast(this->operatorCounters.size())); @@ -521,10 +536,12 @@ uint32_t Operator::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("done", ::apache::thrift::protocol::T_BOOL, 5); xfer += oprot->writeBool(this->done); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("started", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool(this->started); xfer += oprot->writeFieldEnd(); @@ -690,16 +707,20 @@ uint32_t Task::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Task::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Task"); + ++fcnt; xfer += oprot->writeFieldBegin("taskId", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->taskId); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("taskType", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->taskType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("taskAttributes", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->taskAttributes.size())); @@ -713,6 +734,7 @@ uint32_t Task::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("taskCounters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I64, static_cast(this->taskCounters.size())); @@ -727,11 +749,13 @@ uint32_t Task::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); if (this->__isset.operatorGraph) { + ++fcnt; xfer += oprot->writeFieldBegin("operatorGraph", ::apache::thrift::protocol::T_STRUCT, 5); xfer += this->operatorGraph.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.operatorList) { + ++fcnt; xfer += oprot->writeFieldBegin("operatorList", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->operatorList.size())); @@ -744,10 +768,12 @@ uint32_t Task::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("done", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->done); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("started", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->started); xfer += oprot->writeFieldEnd(); @@ -907,16 +933,20 @@ uint32_t Stage::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Stage::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Stage"); + ++fcnt; xfer += oprot->writeFieldBegin("stageId", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->stageId); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("stageType", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->stageType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("stageAttributes", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->stageAttributes.size())); @@ -930,6 +960,7 @@ uint32_t Stage::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("stageCounters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I64, static_cast(this->stageCounters.size())); @@ -943,6 +974,7 @@ uint32_t Stage::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("taskList", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->taskList.size())); @@ -955,10 +987,12 @@ uint32_t Stage::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("done", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool(this->done); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("started", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->started); xfer += oprot->writeFieldEnd(); @@ -1123,16 +1157,20 @@ uint32_t Query::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Query::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Query"); + ++fcnt; xfer += oprot->writeFieldBegin("queryId", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->queryId); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("queryType", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->queryType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("queryAttributes", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->queryAttributes.size())); @@ -1146,6 +1184,7 @@ uint32_t Query::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("queryCounters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I64, static_cast(this->queryCounters.size())); @@ -1159,10 +1198,12 @@ uint32_t Query::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("stageGraph", ::apache::thrift::protocol::T_STRUCT, 5); xfer += this->stageGraph.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("stageList", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->stageList.size())); @@ -1175,10 +1216,12 @@ uint32_t Query::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("done", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->done); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("started", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->started); xfer += oprot->writeFieldEnd(); @@ -1274,8 +1317,10 @@ uint32_t QueryPlan::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t QueryPlan::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("QueryPlan"); + ++fcnt; xfer += oprot->writeFieldBegin("queries", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->queries.size())); @@ -1288,10 +1333,12 @@ uint32_t QueryPlan::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("done", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool(this->done); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("started", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool(this->started); xfer += oprot->writeFieldEnd(); diff --git ql/src/gen/thrift/gen-cpp/queryplan_types.h ql/src/gen/thrift/gen-cpp/queryplan_types.h index ac73bc5..9341ef5 100644 --- ql/src/gen/thrift/gen-cpp/queryplan_types.h +++ ql/src/gen/thrift/gen-cpp/queryplan_types.h @@ -58,9 +58,10 @@ struct OperatorType { MUX = 19, DEMUX = 20, EVENT = 21, - ORCFILEMERGE = 22, - RCFILEMERGE = 23, - MERGEJOIN = 24 + SPARKPRUNINGSINK = 22, + ORCFILEMERGE = 23, + RCFILEMERGE = 24, + MERGEJOIN = 25 }; }; @@ -121,14 +122,17 @@ class Adjacency { void __set_node(const std::string& val) { node = val; + __isset.node = true; } void __set_children(const std::vector & val) { children = val; + __isset.children = true; } void __set_adjacencyType(const AdjacencyType::type val) { adjacencyType = val; + __isset.adjacencyType = true; } bool operator == (const Adjacency & rhs) const @@ -180,14 +184,17 @@ class Graph { void __set_nodeType(const NodeType::type val) { nodeType = val; + __isset.nodeType = true; } void __set_roots(const std::vector & val) { roots = val; + __isset.roots = true; } void __set_adjacencyList(const std::vector & val) { adjacencyList = val; + __isset.adjacencyList = true; } bool operator == (const Graph & rhs) const @@ -245,26 +252,32 @@ class Operator { void __set_operatorId(const std::string& val) { operatorId = val; + __isset.operatorId = true; } void __set_operatorType(const OperatorType::type val) { operatorType = val; + __isset.operatorType = true; } void __set_operatorAttributes(const std::map & val) { operatorAttributes = val; + __isset.operatorAttributes = true; } void __set_operatorCounters(const std::map & val) { operatorCounters = val; + __isset.operatorCounters = true; } void __set_done(const bool val) { done = val; + __isset.done = true; } void __set_started(const bool val) { started = val; + __isset.started = true; } bool operator == (const Operator & rhs) const @@ -332,18 +345,22 @@ class Task { void __set_taskId(const std::string& val) { taskId = val; + __isset.taskId = true; } void __set_taskType(const TaskType::type val) { taskType = val; + __isset.taskType = true; } void __set_taskAttributes(const std::map & val) { taskAttributes = val; + __isset.taskAttributes = true; } void __set_taskCounters(const std::map & val) { taskCounters = val; + __isset.taskCounters = true; } void __set_operatorGraph(const Graph& val) { @@ -358,10 +375,12 @@ class Task { void __set_done(const bool val) { done = val; + __isset.done = true; } void __set_started(const bool val) { started = val; + __isset.started = true; } bool operator == (const Task & rhs) const @@ -435,30 +454,37 @@ class Stage { void __set_stageId(const std::string& val) { stageId = val; + __isset.stageId = true; } void __set_stageType(const StageType::type val) { stageType = val; + __isset.stageType = true; } void __set_stageAttributes(const std::map & val) { stageAttributes = val; + __isset.stageAttributes = true; } void __set_stageCounters(const std::map & val) { stageCounters = val; + __isset.stageCounters = true; } void __set_taskList(const std::vector & val) { taskList = val; + __isset.taskList = true; } void __set_done(const bool val) { done = val; + __isset.done = true; } void __set_started(const bool val) { started = val; + __isset.started = true; } bool operator == (const Stage & rhs) const @@ -528,34 +554,42 @@ class Query { void __set_queryId(const std::string& val) { queryId = val; + __isset.queryId = true; } void __set_queryType(const std::string& val) { queryType = val; + __isset.queryType = true; } void __set_queryAttributes(const std::map & val) { queryAttributes = val; + __isset.queryAttributes = true; } void __set_queryCounters(const std::map & val) { queryCounters = val; + __isset.queryCounters = true; } void __set_stageGraph(const Graph& val) { stageGraph = val; + __isset.stageGraph = true; } void __set_stageList(const std::vector & val) { stageList = val; + __isset.stageList = true; } void __set_done(const bool val) { done = val; + __isset.done = true; } void __set_started(const bool val) { started = val; + __isset.started = true; } bool operator == (const Query & rhs) const @@ -617,14 +651,17 @@ class QueryPlan { void __set_queries(const std::vector & val) { queries = val; + __isset.queries = true; } void __set_done(const bool val) { done = val; + __isset.done = true; } void __set_started(const bool val) { started = val; + __isset.started = true; } bool operator == (const QueryPlan & rhs) const diff --git ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java index e18f935..a142fee 100644 --- ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java +++ ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java @@ -34,9 +34,10 @@ MUX(19), DEMUX(20), EVENT(21), - ORCFILEMERGE(22), - RCFILEMERGE(23), - MERGEJOIN(24); + SPARKPRUNINGSINK(22), + ORCFILEMERGE(23), + RCFILEMERGE(24), + MERGEJOIN(25); private final int value; @@ -102,10 +103,12 @@ public static OperatorType findByValue(int value) { case 21: return EVENT; case 22: - return ORCFILEMERGE; + return SPARKPRUNINGSINK; case 23: - return RCFILEMERGE; + return ORCFILEMERGE; case 24: + return RCFILEMERGE; + case 25: return MERGEJOIN; default: return null; diff --git ql/src/gen/thrift/gen-php/Types.php ql/src/gen/thrift/gen-php/Types.php index 7121ed4..d95f87b 100644 --- ql/src/gen/thrift/gen-php/Types.php +++ ql/src/gen/thrift/gen-php/Types.php @@ -57,9 +57,10 @@ final class OperatorType { const MUX = 19; const DEMUX = 20; const EVENT = 21; - const ORCFILEMERGE = 22; - const RCFILEMERGE = 23; - const MERGEJOIN = 24; + const SPARKPRUNINGSINK = 22; + const ORCFILEMERGE = 23; + const RCFILEMERGE = 24; + const MERGEJOIN = 25; static public $__names = array( 0 => 'JOIN', 1 => 'MAPJOIN', @@ -83,9 +84,10 @@ final class OperatorType { 19 => 'MUX', 20 => 'DEMUX', 21 => 'EVENT', - 22 => 'ORCFILEMERGE', - 23 => 'RCFILEMERGE', - 24 => 'MERGEJOIN', + 22 => 'SPARKPRUNINGSINK', + 23 => 'ORCFILEMERGE', + 24 => 'RCFILEMERGE', + 25 => 'MERGEJOIN', ); } diff --git ql/src/gen/thrift/gen-py/queryplan/ttypes.py ql/src/gen/thrift/gen-py/queryplan/ttypes.py index 53c0106..e4925c2 100644 --- ql/src/gen/thrift/gen-py/queryplan/ttypes.py +++ ql/src/gen/thrift/gen-py/queryplan/ttypes.py @@ -67,9 +67,10 @@ class OperatorType: MUX = 19 DEMUX = 20 EVENT = 21 - ORCFILEMERGE = 22 - RCFILEMERGE = 23 - MERGEJOIN = 24 + SPARKPRUNINGSINK = 22 + ORCFILEMERGE = 23 + RCFILEMERGE = 24 + MERGEJOIN = 25 _VALUES_TO_NAMES = { 0: "JOIN", @@ -94,9 +95,10 @@ class OperatorType: 19: "MUX", 20: "DEMUX", 21: "EVENT", - 22: "ORCFILEMERGE", - 23: "RCFILEMERGE", - 24: "MERGEJOIN", + 22: "SPARKPRUNINGSINK", + 23: "ORCFILEMERGE", + 24: "RCFILEMERGE", + 25: "MERGEJOIN", } _NAMES_TO_VALUES = { @@ -122,9 +124,10 @@ class OperatorType: "MUX": 19, "DEMUX": 20, "EVENT": 21, - "ORCFILEMERGE": 22, - "RCFILEMERGE": 23, - "MERGEJOIN": 24, + "SPARKPRUNINGSINK": 22, + "ORCFILEMERGE": 23, + "RCFILEMERGE": 24, + "MERGEJOIN": 25, } class TaskType: diff --git ql/src/gen/thrift/gen-rb/queryplan_types.rb ql/src/gen/thrift/gen-rb/queryplan_types.rb index c2c4220..b656eb9 100644 --- ql/src/gen/thrift/gen-rb/queryplan_types.rb +++ ql/src/gen/thrift/gen-rb/queryplan_types.rb @@ -43,11 +43,12 @@ module OperatorType MUX = 19 DEMUX = 20 EVENT = 21 - ORCFILEMERGE = 22 - RCFILEMERGE = 23 - MERGEJOIN = 24 - VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE", 24 => "MERGEJOIN"} - VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE, MERGEJOIN]).freeze + SPARKPRUNINGSINK = 22 + ORCFILEMERGE = 23 + RCFILEMERGE = 24 + MERGEJOIN = 25 + VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "SPARKPRUNINGSINK", 23 => "ORCFILEMERGE", 24 => "RCFILEMERGE", 25 => "MERGEJOIN"} + VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, SPARKPRUNINGSINK, ORCFILEMERGE, RCFILEMERGE, MERGEJOIN]).freeze end module TaskType diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index 9867739..8c09ee0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -1024,10 +1024,10 @@ private void forward(Object[] keys, AggregationBuffer[] aggs) throws HiveExcepti public void flush() throws HiveException{ try { if (hashAggregations != null) { - if (isLogInfoEnabled) { - LOG.info("Begin Hash Table flush: size = " - + hashAggregations.size()); - } + if (isLogInfoEnabled) { + LOG.info("Begin Hash Table flush: size = " + + hashAggregations.size()); + } Iterator iter = hashAggregations.entrySet().iterator(); while (iter.hasNext()) { Map.Entry m = (Map.Entry) iter diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java index 91e8a02..c05641b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java @@ -33,8 +33,11 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorSelectOperator; +import org.apache.hadoop.hive.ql.exec.vector.VectorSparkPartitionPruningSinkOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; +import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator; import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; import org.apache.hadoop.hive.ql.plan.CollectDesc; import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc; @@ -116,6 +119,8 @@ AppMasterEventOperator.class)); opvec.add(new OpTuple(DynamicPruningEventDesc.class, AppMasterEventOperator.class)); + opvec.add(new OpTuple(SparkPartitionPruningSinkDesc.class, + SparkPartitionPruningSinkOperator.class)); opvec.add(new OpTuple(RCFileMergeDesc.class, RCFileMergeOperator.class)); opvec.add(new OpTuple(OrcFileMergeDesc.class, @@ -132,6 +137,9 @@ VectorAppMasterEventOperator.class)); vectorOpvec.add(new OpTuple(DynamicPruningEventDesc.class, VectorAppMasterEventOperator.class)); + vectorOpvec.add(new OpTuple( + SparkPartitionPruningSinkDesc.class, + VectorSparkPartitionPruningSinkOperator.class)); vectorOpvec.add(new OpTuple(SelectDesc.class, VectorSelectOperator.class)); vectorOpvec.add(new OpTuple(GroupByDesc.class, VectorGroupByOperator.class)); vectorOpvec.add(new OpTuple(MapJoinDesc.class, VectorMapJoinOperator.class)); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java index 21398d8..007db75 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.Writable; import org.apache.hive.spark.client.rpc.RpcConfiguration; import org.apache.spark.SparkConf; import org.apache.spark.SparkException; @@ -148,11 +149,11 @@ public static HiveSparkClient createHiveSparkClient(HiveConf hiveconf) Set classes = Sets.newHashSet( Splitter.on(",").trimResults().omitEmptyStrings().split( Strings.nullToEmpty(sparkConf.get("spark.kryo.classesToRegister")))); + classes.add(Writable.class.getName()); classes.add(VectorizedRowBatch.class.getName()); classes.add(BytesWritable.class.getName()); classes.add(HiveKey.class.getName()); - sparkConf.put( - "spark.kryo.classesToRegister", Joiner.on(",").join(classes)); + sparkConf.put("spark.kryo.classesToRegister", Joiner.on(",").join(classes)); return sparkConf; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java new file mode 100644 index 0000000..116934e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java @@ -0,0 +1,260 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.spark; + +import java.io.ObjectInputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.clearspring.analytics.util.Preconditions; +import javolution.testing.AssertionException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; +import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * The spark version of DynamicPartitionPruner. + */ +public class SparkDynamicPartitionPruner { + private static final Log LOG = LogFactory.getLog(SparkDynamicPartitionPruner.class); + private final Map> sourceInfoMap = new LinkedHashMap>(); + private final BytesWritable writable = new BytesWritable(); + + public void prune(MapWork work, JobConf jobConf) throws HiveException, SerDeException { + sourceInfoMap.clear(); + initialize(work, jobConf); + if (sourceInfoMap.size() == 0) { + // Nothing to prune for this MapWork + return; + } + processFiles(work, jobConf); + prunePartitions(work); + } + + public void initialize(MapWork work, JobConf jobConf) throws SerDeException { + Map columnMap = new HashMap(); + Set sourceWorkIds = work.getEventSourceTableDescMap().keySet(); + + for (String id : sourceWorkIds) { + List tables = work.getEventSourceTableDescMap().get(id); + List columnNames = work.getEventSourceColumnNameMap().get(id); + List partKeyExprs = work.getEventSourcePartKeyExprMap().get(id); + + Iterator cit = columnNames.iterator(); + Iterator pit = partKeyExprs.iterator(); + for (TableDesc t : tables) { + String columnName = cit.next(); + ExprNodeDesc partKeyExpr = pit.next(); + SourceInfo si = new SourceInfo(t, partKeyExpr, columnName, jobConf); + if (!sourceInfoMap.containsKey(id)) { + sourceInfoMap.put(id, new ArrayList()); + } + sourceInfoMap.get(id).add(si); + + // We could have multiple sources restrict the same column, need to take + // the union of the values in that case. + if (columnMap.containsKey(columnName)) { + si.values = columnMap.get(columnName).values; + } + columnMap.put(columnName, si); + } + } + } + + private void processFiles(MapWork work, JobConf jobConf) throws HiveException { + try { + Path baseDir = work.getTmpPathForPartitionPruning(); + FileSystem fs = FileSystem.get(baseDir.toUri(), jobConf); + ObjectInputStream in = null; + + // Find the SourceInfo to put values in. + for (String name : sourceInfoMap.keySet()) { + Path sourceDir = new Path(baseDir, name); + for (FileStatus fstatus : fs.listStatus(sourceDir)) { + LOG.info("Start processing pruning file: " + fstatus.getPath()); + in = new ObjectInputStream(fs.open(fstatus.getPath())); + String columnName = in.readUTF(); + SourceInfo info = null; + + for (SourceInfo si : sourceInfoMap.get(name)) { + if (columnName.equals(si.columnName)) { + info = si; + break; + } + } + + Preconditions.checkArgument(info != null, + "AssertionError: no source info for the column: " + columnName); + + // Read fields + while (in.available() > 0) { + writable.readFields(in); + + Object row = info.deserializer.deserialize(writable); + Object value = info.soi.getStructFieldData(row, info.field); + value = ObjectInspectorUtils.copyToStandardObject(value, info.fieldInspector); + info.values.add(value); + } + } + } + in.close(); + } catch (Exception e) { + throw new HiveException(e); + } + } + + private void prunePartitions(MapWork work) throws HiveException { + for (String source : sourceInfoMap.keySet()) { + for (SourceInfo info : sourceInfoMap.get(source)) { + prunePartitionSingleSource(info, work); + } + } + } + + private void prunePartitionSingleSource(SourceInfo info, MapWork work) + throws HiveException { + Set values = info.values; + String columnName = info.columnName; + + ObjectInspector oi = + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(info.fieldInspector.getTypeName())); + + ObjectInspectorConverters.Converter converter = + ObjectInspectorConverters.getConverter( + PrimitiveObjectInspectorFactory.javaStringObjectInspector, oi); + + StructObjectInspector soi = + ObjectInspectorFactory.getStandardStructObjectInspector( + Collections.singletonList(columnName), Collections.singletonList(oi)); + + @SuppressWarnings("rawtypes") + ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(info.partKey); + eval.initialize(soi); + + applyFilterToPartitions(work, converter, eval, columnName, values); + } + + private void applyFilterToPartitions( + MapWork work, + ObjectInspectorConverters.Converter converter, + ExprNodeEvaluator eval, + String columnName, + Set values) throws HiveException { + + Object[] row = new Object[1]; + + Iterator it = work.getPathToPartitionInfo().keySet().iterator(); + while (it.hasNext()) { + String p = it.next(); + PartitionDesc desc = work.getPathToPartitionInfo().get(p); + Map spec = desc.getPartSpec(); + if (spec == null) { + throw new AssertionException("No partition spec found in dynamic pruning"); + } + + String partValueString = spec.get(columnName); + if (partValueString == null) { + throw new AssertionException("Could not find partition value for column: " + columnName); + } + + Object partValue = converter.convert(partValueString); + if (LOG.isDebugEnabled()) { + LOG.debug("Converted partition value: " + partValue + " original (" + partValueString + ")"); + } + + row[0] = partValue; + partValue = eval.evaluate(row); + if (LOG.isDebugEnabled()) { + LOG.debug("part key expr applied: " + partValue); + } + + if (!values.contains(partValue)) { + LOG.info("Pruning path: " + p); + it.remove(); + work.getPathToAliases().remove(p); + work.getPaths().remove(p); + work.getPartitionDescs().remove(desc); + } + } + } + + @SuppressWarnings("deprecation") + private static class SourceInfo { + final ExprNodeDesc partKey; + final Deserializer deserializer; + final StructObjectInspector soi; + final StructField field; + final ObjectInspector fieldInspector; + Set values = new HashSet(); + final String columnName; + + SourceInfo(TableDesc table, ExprNodeDesc partKey, String columnName, JobConf jobConf) + throws SerDeException { + this.partKey = partKey; + this.columnName = columnName; + + deserializer = ReflectionUtils.newInstance(table.getDeserializerClass(), null); + deserializer.initialize(jobConf, table.getProperties()); + + ObjectInspector inspector = deserializer.getObjectInspector(); + LOG.debug("Type of obj insp: " + inspector.getTypeName()); + + soi = (StructObjectInspector) inspector; + List fields = soi.getAllStructFieldRefs(); + if (fields.size() > 1) { + LOG.error("expecting single field in input"); + } + + field = fields.get(0); + fieldInspector = + ObjectInspectorUtils.getStandardObjectInspector(field.getFieldObjectInspector()); + } + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java index e6c845c..3246bb1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkUtilities.java @@ -22,16 +22,22 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.UUID; +import java.util.Collection; +import com.google.common.base.Preconditions; import org.apache.commons.io.FilenameUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession; import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManager; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.SparkWork; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.io.BytesWritable; import org.apache.spark.SparkConf; @@ -122,4 +128,54 @@ public static SparkSession getSparkSession(HiveConf conf, SessionState.get().setSparkSession(sparkSession); return sparkSession; } + + /** + * Generate a temporary path for dynamic partition pruning in Spark branch + * TODO: no longer need this if we use accumulator! + * @param basePath + * @param id + * @return + */ + public static Path generateTmpPathForPartitionPruning(Path basePath, String id) { + return new Path(basePath, id); + } + + /** + * Return the ID for this BaseWork, in String form. + * @param work the input BaseWork + * @return the unique ID for this BaseWork + */ + public static String getWorkId(BaseWork work) { + String workName = work.getName(); + return workName.substring(workName.indexOf(" ") + 1); + } + + public static SparkTask createSparkTask(HiveConf conf) { + return (SparkTask) TaskFactory.get( + new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID)), conf); + } + + public static SparkTask createSparkTask(SparkWork work, HiveConf conf) { + return (SparkTask) TaskFactory.get(work, conf); + } + + /** + * Recursively find all operators under root, that are of class clazz, and + * put them in result. + * @param result all operators under root that are of class clazz + * @param root the root operator under which all operators will be examined + * @param clazz clas to collect. Must NOT be null. + */ + public static void collectOp(Collection> result, Operator root, Class clazz) { + Preconditions.checkArgument(clazz != null, "AssertionError: clazz should not be null"); + if (root == null) { + return; + } + if (clazz.equals(root.getClass())) { + result.add(root); + } + for (Operator child : root.getChildOperators()) { + collectOp(result, child, clazz); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSparkPartitionPruningSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSparkPartitionPruningSinkOperator.java new file mode 100644 index 0000000..267aace --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSparkPartitionPruningSinkOperator.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector; + +import java.util.Collection; +import java.util.concurrent.Future; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; +import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator; +import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.io.ObjectWritable; +import org.apache.hadoop.io.Writable; + +/** + * App Master Event operator implementation. + **/ +public class VectorSparkPartitionPruningSinkOperator extends SparkPartitionPruningSinkOperator { + + private static final long serialVersionUID = 1L; + + protected transient Object[] singleRow; + + protected transient VectorExpressionWriter[] valueWriters; + + public VectorSparkPartitionPruningSinkOperator(VectorizationContext context, + OperatorDesc conf) { + super(); + this.conf = (SparkPartitionPruningSinkDesc) conf; + } + + public VectorSparkPartitionPruningSinkOperator() { + } + + @Override + public Collection> initializeOp(Configuration hconf) throws HiveException { + Collection> result = super.initializeOp(hconf); + valueWriters = VectorExpressionWriterFactory.getExpressionWriters( + (StructObjectInspector) inputObjInspectors[0]); + singleRow = new Object[valueWriters.length]; + return result; + } + + @Override + public void process(Object data, int tag) throws HiveException { + + VectorizedRowBatch vrg = (VectorizedRowBatch) data; + + Writable [] records = null; + Writable recordValue = null; + boolean vectorizedSerde = false; + + try { + if (serializer instanceof VectorizedSerde) { + recordValue = ((VectorizedSerde) serializer).serializeVector(vrg, + inputObjInspectors[0]); + records = (Writable[]) ((ObjectWritable) recordValue).get(); + vectorizedSerde = true; + } + } catch (SerDeException e1) { + throw new HiveException(e1); + } + + for (int i = 0; i < vrg.size; i++) { + Writable row = null; + if (vectorizedSerde) { + row = records[i]; + } else { + if (vrg.valueWriters == null) { + vrg.setValueWriters(this.valueWriters); + } + try { + row = serializer.serialize(getRowObject(vrg, i), inputObjInspectors[0]); + } catch (SerDeException ex) { + throw new HiveException(ex); + } + } + try { + row.write(buffer); + } catch (Exception e) { + throw new HiveException(e); + } + } + } + + private Object[] getRowObject(VectorizedRowBatch vrg, int rowIndex) + throws HiveException { + int batchIndex = rowIndex; + if (vrg.selectedInUse) { + batchIndex = vrg.selected[rowIndex]; + } + for (int i = 0; i < vrg.projectionSize; i++) { + ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; + singleRow[i] = vrg.valueWriters[i].writeValue(vectorColumn, batchIndex); + } + return singleRow; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java index 1de7e40..077cd6f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java @@ -25,6 +25,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -514,34 +515,37 @@ public int hashCode() { int numThreads = Math.min(MAX_CHECK_NONCOMBINABLE_THREAD_NUM, (int) Math.ceil((double) paths.length / DEFAULT_NUM_PATH_PER_THREAD)); int numPathPerThread = (int) Math.ceil((double) paths.length / numThreads); - LOG.info("Total number of paths: " + paths.length + - ", launching " + numThreads + " threads to check non-combinable ones."); - ExecutorService executor = Executors.newFixedThreadPool(numThreads); - List>> futureList = new ArrayList>>(numThreads); - try { - for (int i = 0; i < numThreads; i++) { - int start = i * numPathPerThread; - int length = i != numThreads - 1 ? numPathPerThread : paths.length - start; - futureList.add(executor.submit( - new CheckNonCombinablePathCallable(paths, start, length, job))); - } - Set nonCombinablePathIndices = new HashSet(); - for (Future> future : futureList) { - nonCombinablePathIndices.addAll(future.get()); - } - for (int i = 0; i < paths.length; i++) { - if (nonCombinablePathIndices.contains(i)) { - nonCombinablePaths.add(paths[i]); - } else { - combinablePaths.add(paths[i]); + + if (numThreads > 0) { + LOG.info("Total number of paths: " + paths.length + + ", launching " + numThreads + " threads to check non-combinable ones."); + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + List>> futureList = new ArrayList>>(numThreads); + try { + for (int i = 0; i < numThreads; i++) { + int start = i * numPathPerThread; + int length = i != numThreads - 1 ? numPathPerThread : paths.length - start; + futureList.add(executor.submit( + new CheckNonCombinablePathCallable(paths, start, length, job))); + } + Set nonCombinablePathIndices = new HashSet(); + for (Future> future : futureList) { + nonCombinablePathIndices.addAll(future.get()); + } + for (int i = 0; i < paths.length; i++) { + if (nonCombinablePathIndices.contains(i)) { + nonCombinablePaths.add(paths[i]); + } else { + combinablePaths.add(paths[i]); + } } + } catch (Exception e) { + LOG.error("Error checking non-combinable path", e); + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS); + throw new IOException(e); + } finally { + executor.shutdownNow(); } - } catch (Exception e) { - LOG.error("Error checking non-combinable path", e); - perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.GET_SPLITS); - throw new IOException(e); - } finally { - executor.shutdownNow(); } // Store the previous value for the path specification diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 9d5730d..1e59be4 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.Map.Entry; @@ -38,6 +39,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.exec.spark.SparkDynamicPartitionPruner; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; @@ -267,6 +269,18 @@ protected void init(JobConf job) { } else { mrwork = Utilities.getMapWork(job); } + + // Prune partitions + if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark") + && HiveConf.getBoolVar(job, HiveConf.ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING)) { + SparkDynamicPartitionPruner pruner = new SparkDynamicPartitionPruner(); + try { + pruner.prune(mrwork, job); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + pathToPartitionInfo = mrwork.getPathToPartitionInfo(); } } @@ -310,18 +324,28 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } Path[] getInputPaths(JobConf job) throws IOException { - Path[] dirs = FileInputFormat.getInputPaths(job); - if (dirs.length == 0) { - // on tez we're avoiding to duplicate the file info in FileInputFormat. - if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - try { - List paths = Utilities.getInputPathsTez(job, mrwork); - dirs = paths.toArray(new Path[paths.size()]); - } catch (Exception e) { - throw new IOException("Could not create input files", e); + Path[] dirs; + if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { + Set pathStrings = mrwork.getPathToPartitionInfo().keySet(); + dirs = new Path[pathStrings.size()]; + Iterator it = pathStrings.iterator(); + for (int i = 0; i < dirs.length; i++) { + dirs[i] = new Path(it.next()); + } + } else { + dirs = FileInputFormat.getInputPaths(job); + if (dirs.length == 0) { + // on tez we're avoiding to duplicate the file info in FileInputFormat. + if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { + try { + List paths = Utilities.getInputPathsTez(job, mrwork); + dirs = paths.toArray(new Path[paths.size()]); + } catch (Exception e) { + throw new IOException("Could not create input files", e); + } + } else { + throw new IOException("No input paths specified in job"); } - } else { - throw new IOException("No input paths specified in job"); } } return dirs; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index ea5efe5..4f93aa6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -161,7 +161,7 @@ public void initialize(HiveConf hiveConf) { if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) { transformations.add(new StatsOptimizer()); } - if (isSparkExecEngine || (pctx.getContext().getExplain() && !isTezExecEngine)) { + if (pctx.getContext().getExplain() && !isTezExecEngine && !isSparkExecEngine) { transformations.add(new AnnotateWithStatistics()); transformations.add(new AnnotateWithOpTraits()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkDynamicPartitionPruningOptimization.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkDynamicPartitionPruningOptimization.java new file mode 100644 index 0000000..8db93bd --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkDynamicPartitionPruningOptimization.java @@ -0,0 +1,343 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.FilterOperator; +import org.apache.hadoop.hive.ql.exec.GroupByOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorFactory; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.SelectOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.lib.RuleRegExp; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.spark.OptimizeSparkProcContext; +import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDynamicListDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.SelectDesc; + +/** + * This optimization looks for expressions of the kind "x IN (RS[n])". If such + * an expression made it to a table scan operator and x is a partition column we + * can use an existing join to dynamically prune partitions. This class sets up + * the infrastructure for that. + * + * Copied from DynamicPartitionPruningOptimization + */ +public class SparkDynamicPartitionPruningOptimization implements NodeProcessor { + + static final private Log LOG = LogFactory.getLog(SparkDynamicPartitionPruningOptimization.class + .getName()); + + public static class DynamicPartitionPrunerProc implements NodeProcessor { + + /** + * process simply remembers all the dynamic partition pruning expressions + * found + */ + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + ExprNodeDynamicListDesc desc = (ExprNodeDynamicListDesc) nd; + DynamicPartitionPrunerContext context = (DynamicPartitionPrunerContext) procCtx; + + // Rule is searching for dynamic pruning expr. There's at least an IN + // expression wrapping it. + ExprNodeDesc parent = (ExprNodeDesc) stack.get(stack.size() - 2); + ExprNodeDesc grandParent = stack.size() >= 3 ? + (ExprNodeDesc) stack.get(stack.size() - 3) : null; + + context.addDynamicList(desc, parent, grandParent, (ReduceSinkOperator) desc.getSource()); + + return context; + } + } + + private static class DynamicListContext { + public ExprNodeDynamicListDesc desc; + public ExprNodeDesc parent; + public ExprNodeDesc grandParent; + public ReduceSinkOperator generator; + + public DynamicListContext(ExprNodeDynamicListDesc desc, ExprNodeDesc parent, + ExprNodeDesc grandParent, ReduceSinkOperator generator) { + this.desc = desc; + this.parent = parent; + this.grandParent = grandParent; + this.generator = generator; + } + } + + private static class DynamicPartitionPrunerContext implements NodeProcessorCtx, + Iterable { + public List dynLists = new ArrayList(); + + public void addDynamicList(ExprNodeDynamicListDesc desc, ExprNodeDesc parent, + ExprNodeDesc grandParent, ReduceSinkOperator generator) { + dynLists.add(new DynamicListContext(desc, parent, grandParent, generator)); + } + + @Override + public Iterator iterator() { + return dynLists.iterator(); + } + } + + private String extractColName(ExprNodeDesc root) { + if (root instanceof ExprNodeColumnDesc) { + return ((ExprNodeColumnDesc) root).getColumn(); + } else { + if (root.getChildren() == null) { + return null; + } + + String column = null; + for (ExprNodeDesc d: root.getChildren()) { + String candidate = extractColName(d); + if (column != null && candidate != null) { + return null; + } else if (candidate != null) { + column = candidate; + } + } + return column; + } + } + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... no) + throws SemanticException { + OptimizeSparkProcContext context = (OptimizeSparkProcContext) procCtx; + ParseContext parseContext = context.getParseContext(); + + FilterOperator filter = (FilterOperator) nd; + FilterDesc desc = filter.getConf(); + TableScanOperator ts = null; + + DynamicPartitionPrunerContext removerContext = new DynamicPartitionPrunerContext(); + + if (filter.getParentOperators().size() == 1 + && filter.getParentOperators().get(0) instanceof TableScanOperator) { + ts = (TableScanOperator) filter.getParentOperators().get(0); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Parent: " + filter.getParentOperators().get(0)); + LOG.debug("Filter: " + desc.getPredicateString()); + LOG.debug("TableScan: " + ts); + } + + // collect the dynamic pruning conditions + removerContext.dynLists.clear(); + walkExprTree(desc.getPredicate(), removerContext); + + for (DynamicListContext ctx : removerContext) { + String column = extractColName(ctx.parent); + + if (ts != null && column != null) { + Table table = ts.getConf().getTableMetadata(); + + if (table != null && table.isPartitionKey(column)) { + String alias = ts.getConf().getAlias(); + PrunedPartitionList plist = parseContext.getPrunedPartitions(alias, ts); + if (LOG.isDebugEnabled()) { + LOG.debug("alias: " + alias); + LOG.debug("pruned partition list: "); + if (plist != null) { + for (Partition p : plist.getPartitions()) { + LOG.debug(p.getCompleteName()); + } + } + } + if (plist == null || plist.getPartitions().size() != 0) { + LOG.info("Dynamic partitioning: " + table.getCompleteName() + "." + column); + generatePartitionPrunerPlan(ctx, parseContext, ts, column); + } else { + // all partitions have been statically removed + LOG.debug("No partition pruning necessary."); + } + } else { + LOG.debug("Column " + column + " is not a partition column"); + } + } + + // we always remove the condition by replacing it with "true" + ExprNodeDesc constNode = new ExprNodeConstantDesc(ctx.parent.getTypeInfo(), true); + if (ctx.grandParent == null) { + desc.setPredicate(constNode); + } else { + int i = ctx.grandParent.getChildren().indexOf(ctx.parent); + ctx.grandParent.getChildren().remove(i); + ctx.grandParent.getChildren().add(i, constNode); + } + } + + // if we pushed the predicate into the table scan we need to remove the + // synthetic conditions there. + cleanTableScanFilters(ts); + + return false; + } + + private void cleanTableScanFilters(TableScanOperator ts) throws SemanticException { + + if (ts == null || ts.getConf() == null || ts.getConf().getFilterExpr() == null) { + // nothing to do + return; + } + + DynamicPartitionPrunerContext removerContext = new DynamicPartitionPrunerContext(); + + // collect the dynamic pruning conditions + removerContext.dynLists.clear(); + walkExprTree(ts.getConf().getFilterExpr(), removerContext); + + for (DynamicListContext ctx : removerContext) { + // remove the condition by replacing it with "true" + ExprNodeDesc constNode = new ExprNodeConstantDesc(ctx.parent.getTypeInfo(), true); + if (ctx.grandParent == null) { + // we're the only node, just clear out the expression + ts.getConf().setFilterExpr(null); + } else { + int i = ctx.grandParent.getChildren().indexOf(ctx.parent); + ctx.grandParent.getChildren().remove(i); + ctx.grandParent.getChildren().add(i, constNode); + } + } + } + + private void generatePartitionPrunerPlan(DynamicListContext ctx, ParseContext parseContext, + TableScanOperator ts, String column) { + + // we will put a fork in the plan at the source of the reduce sink + Operator parentOfRS = ctx.generator.getParentOperators().get(0); + + // we need the expr that generated the key of the reduce sink + ExprNodeDesc key = ctx.generator.getConf().getKeyCols().get(ctx.desc.getKeyIndex()); + + // we also need the expr for the partitioned table + ExprNodeDesc partKey = ctx.parent.getChildren().get(0); + + if (LOG.isDebugEnabled()) { + LOG.debug("key expr: " + key); + LOG.debug("partition key expr: " + partKey); + } + + List keyExprs = new ArrayList(); + keyExprs.add(key); + + // group by requires "ArrayList", don't ask. + ArrayList outputNames = new ArrayList(); + outputNames.add(HiveConf.getColumnInternalName(0)); + + // project the relevant key column + SelectDesc select = new SelectDesc(keyExprs, outputNames); + SelectOperator selectOp = + (SelectOperator) OperatorFactory.getAndMakeChild(select, parentOfRS); + + // do a group by on the list to dedup + float groupByMemoryUsage = + HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float memoryThreshold = + HiveConf.getFloatVar(parseContext.getConf(), + HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + + ArrayList groupByExprs = new ArrayList(); + ExprNodeDesc groupByExpr = + new ExprNodeColumnDesc(key.getTypeInfo(), outputNames.get(0), null, false); + groupByExprs.add(groupByExpr); + + GroupByDesc groupBy = + new GroupByDesc(GroupByDesc.Mode.HASH, outputNames, groupByExprs, + new ArrayList(), false, groupByMemoryUsage, memoryThreshold, + null, false, 0, true); + + GroupByOperator groupByOp = + (GroupByOperator) OperatorFactory.getAndMakeChild(groupBy, selectOp); + + Map colMap = new HashMap(); + colMap.put(outputNames.get(0), groupByExpr); + groupByOp.setColumnExprMap(colMap); + + // finally add partition pruner sink operator + SparkPartitionPruningSinkDesc desc = new SparkPartitionPruningSinkDesc(); + desc.setTableScan(ts); + desc.setTable(PlanUtils.getReduceValueTableDesc(PlanUtils + .getFieldSchemasFromColumnList(keyExprs, "key"))); + desc.setTargetColumnName(column); + desc.setPartKey(partKey); + + OperatorFactory.getAndMakeChild(desc, groupByOp); + } + + private Map walkExprTree(ExprNodeDesc pred, NodeProcessorCtx ctx) + throws SemanticException { + + // create a walker which walks the tree in a DFS manner while maintaining + // the operator stack. The dispatcher + // generates the plan from the operator tree + Map exprRules = new LinkedHashMap(); + exprRules.put(new RuleRegExp("R1", ExprNodeDynamicListDesc.class.getName() + "%"), + new DynamicPartitionPrunerProc()); + + // The dispatcher fires the processor corresponding to the closest matching + // rule and passes the context along + Dispatcher disp = new DefaultRuleDispatcher(null, exprRules, ctx); + GraphWalker egw = new DefaultGraphWalker(disp); + + List startNodes = new ArrayList(); + startNodes.add(pred); + + HashMap outputMap = new HashMap(); + egw.startWalking(startNodes, outputMap); + return outputMap; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java new file mode 100644 index 0000000..15694f5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkRemoveDynamicPruningBySize.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.Stack; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; +import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.spark.OptimizeSparkProcContext; +import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator; +import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; +import org.apache.hadoop.hive.ql.plan.DynamicPruningEventDesc; + +/** + * If we expect the number of keys for dynamic pruning to be too large we + * disable it. + * + * Cloned from RemoveDynamicPruningBySize, adjusted for Spark branch. + */ +public class SparkRemoveDynamicPruningBySize implements NodeProcessor { + + static final private Log LOG = LogFactory.getLog(RemoveDynamicPruningBySize.class.getName()); + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procContext, + Object... nodeOutputs) + throws SemanticException { + + OptimizeSparkProcContext context = (OptimizeSparkProcContext) procContext; + + SparkPartitionPruningSinkOperator sinkOp = (SparkPartitionPruningSinkOperator) nd; + SparkPartitionPruningSinkDesc desc = sinkOp.getConf(); + + if (desc.getStatistics().getDataSize() > context.getConf() + .getLongVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE)) { + Operator child = sinkOp; + Operator curr = sinkOp; + + while (curr.getChildOperators().size() <= 1) { + child = curr; + curr = curr.getParentOperators().get(0); + } + // at this point we've found the fork in the op pipeline that has the + // pruning as a child plan. + LOG.info("Disabling dynamic pruning for: " + desc.getTableScan().getName() + + ". Expected data size is too big: " + desc.getStatistics().getDataSize()); + curr.removeChild(child); + } + return false; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java index 8e56263..de251db 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java @@ -131,11 +131,15 @@ private boolean containsOp(SparkWork sparkWork, Class clazz) { // of its parent SparkWorks for the small tables private final Map> dependencyGraph; + // To avoid newly generated SparkTask to be processed. + private final Set generatedTaskSet; + public SparkMapJoinTaskDispatcher(PhysicalContext pc) { super(); physicalContext = pc; sparkWorkMap = new LinkedHashMap(); dependencyGraph = new LinkedHashMap>(); + generatedTaskSet = new HashSet(); } // Move the specified work from the sparkWork to the targetWork @@ -292,6 +296,7 @@ private SparkTask createSparkTask(SparkTask originalTask, } createdTaskMap.put(sparkWork, resultTask); + generatedTaskSet.add(resultTask); return resultTask; } @@ -299,7 +304,7 @@ private SparkTask createSparkTask(SparkTask originalTask, public Object dispatch(Node nd, Stack stack, Object... nos) throws SemanticException { Task currentTask = (Task) nd; - if(currentTask.isMapRedTask()) { + if(currentTask.isMapRedTask() && !generatedTaskSet.contains(currentTask)) { if (currentTask instanceof ConditionalTask) { List> taskList = ((ConditionalTask) currentTask).getListTasks(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 5f731d7..a1052c1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -874,6 +874,7 @@ boolean validateMapWorkOperator(Operator op, MapWork mWo case FILESINK: case LIMIT: case EVENT: + case SPARKPRUNINGSINK: ret = true; break; default: @@ -916,6 +917,7 @@ boolean validateReduceWorkOperator(Operator op) { break; case LIMIT: case EVENT: + case SPARKPRUNINGSINK: ret = true; break; default: @@ -1307,6 +1309,7 @@ private void fixupParentChildOperators(Operator op, case LIMIT: case EXTRACT: case EVENT: + case SPARKPRUNINGSINK: vectorOp = OperatorFactory.getVectorOperator(op.getConf(), vContext); break; default: diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkPartitionPruningSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkPartitionPruningSinkDesc.java new file mode 100644 index 0000000..cc78227 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkPartitionPruningSinkDesc.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.spark; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; + +@Explain(displayName = "Spark Partition Pruning Sink Operator") +public class SparkPartitionPruningSinkDesc extends AbstractOperatorDesc { + + // column in the target table that will be pruned against + private String targetColumnName; + + private TableDesc table; + + private transient TableScanOperator tableScan; + + // the partition column we're interested in + private ExprNodeDesc partKey; + + private Path path; + + private String targetWork; + + @Explain(displayName = "tmp Path", explainLevels = { Explain.Level.EXTENDED }) + public Path getPath() { + return path; + } + + public void setPath(Path path) { + this.path = path; + } + + @Explain(displayName = "target work") + public String getTargetWork() { + return this.targetWork; + } + + public void setTargetWork(String targetWork) { + this.targetWork = targetWork; + } + + public TableScanOperator getTableScan() { + return tableScan; + } + + public void setTableScan(TableScanOperator tableScan) { + this.tableScan = tableScan; + } + + @Explain(displayName = "target column name") + public String getTargetColumnName() { + return targetColumnName; + } + + public void setTargetColumnName(String targetColumnName) { + this.targetColumnName = targetColumnName; + } + + public ExprNodeDesc getPartKey() { + return partKey; + } + + public void setPartKey(ExprNodeDesc partKey) { + this.partKey = partKey; + } + + public TableDesc getTable() { + return table; + } + + public void setTable(TableDesc table) { + this.table = table; + } + + @Explain(displayName = "partition key expr") + public String getPartKeyString() { + return partKey.getExprString(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java index 447f104..0a0c791 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkProcContext.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.UnionOperator; import org.apache.hadoop.hive.ql.exec.spark.SparkTask; +import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -40,7 +41,6 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.plan.SparkEdgeProperty; -import org.apache.hadoop.hive.ql.plan.SparkWork; import java.io.Serializable; import java.util.HashMap; @@ -138,6 +138,13 @@ // This is necessary as sometimes semantic analyzer's mapping is different than operator's own alias. public final Map> topOps; + // The set of pruning sinks + public final Set> pruningSinkSet; + + // The set of TableScanOperators for pruning OP trees + public final Set> clonedPruningTableScanSet; + + @SuppressWarnings("unchecked") public GenSparkProcContext(HiveConf conf, ParseContext parseContext, @@ -153,8 +160,7 @@ public GenSparkProcContext(HiveConf conf, this.inputs = inputs; this.outputs = outputs; this.topOps = topOps; - this.currentTask = (SparkTask) TaskFactory.get( - new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID)), conf); + this.currentTask = SparkUtilities.createSparkTask(conf); this.rootTasks.add(currentTask); this.leafOpToFollowingWorkInfo = new LinkedHashMap>(); @@ -177,5 +183,7 @@ public GenSparkProcContext(HiveConf conf, this.clonedReduceSinks = new LinkedHashSet(); this.fileSinkSet = new LinkedHashSet(); this.fileSinkMap = new LinkedHashMap>(); + this.pruningSinkSet = new LinkedHashSet>(); + this.clonedPruningTableScanSet = new LinkedHashSet>(); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java index e27ce0d..72ff330 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java @@ -46,7 +46,9 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.UnionOperator; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; +import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; import org.apache.hadoop.hive.ql.optimizer.spark.SparkSortMergeJoinFactory; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; @@ -61,6 +63,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.hadoop.hive.ql.plan.TableDesc; /** * GenSparkUtils is a collection of shared helper methods to produce SparkWork @@ -132,7 +135,7 @@ protected void setupReduceSink(GenSparkProcContext context, ReduceWork reduceWor // remember which parent belongs to which tag reduceWork.getTagToInput().put(reduceSink.getConf().getTag(), - context.preceedingWork.getName()); + context.preceedingWork.getName()); // remember the output name of the reduce sink reduceSink.getConf().setOutputName(reduceWork.getName()); @@ -218,6 +221,13 @@ public void removeUnionOperators(Configuration conf, GenSparkProcContext context Iterator> newOpQueueIt = newOpQueue.iterator(); for (Operator op : opQueue) { Operator newOp = newOpQueueIt.next(); + + // We need to update rootToWorkMap in case the op is a key + if (context.rootToWorkMap.containsKey(op)) { + context.rootToWorkMap.put(newOp, context.rootToWorkMap.get(op)); + } + // Don't remove the old entry + if (op instanceof FileSinkOperator) { List fileSinkList = context.fileSinkMap.get(op); if (fileSinkList == null) { @@ -225,6 +235,12 @@ public void removeUnionOperators(Configuration conf, GenSparkProcContext context } fileSinkList.add((FileSinkOperator) newOp); context.fileSinkMap.put((FileSinkOperator) op, fileSinkList); + } else if (op instanceof SparkPartitionPruningSinkOperator) { + SparkPartitionPruningSinkOperator oldPruningSink = (SparkPartitionPruningSinkOperator) op; + SparkPartitionPruningSinkOperator newPruningSink = (SparkPartitionPruningSinkOperator) newOp; + newPruningSink.getConf().setTableScan(oldPruningSink.getConf().getTableScan()); + context.pruningSinkSet.add(newPruningSink); + context.pruningSinkSet.remove(oldPruningSink); } } } @@ -337,6 +353,67 @@ public void processFileSink(GenSparkProcContext context, FileSinkOperator fileSi } } + /** + * Populate partition pruning information from the pruning sink operator to the + * target MapWork (the MapWork for the big table side). The information include the source table + * name, column name, and partition key expression. It also set up the temporary path used to + * communicate between the target MapWork and source BaseWork. + * + * Here "source" refers to the small table side, while "target" refers to the big + * table side. + * + * @param context the spark context. + * @param pruningSink the pruner sink operator being processed. + */ + public void processPartitionPruningSink(GenSparkProcContext context, + SparkPartitionPruningSinkOperator pruningSink) { + SparkPartitionPruningSinkDesc desc = pruningSink.getConf(); + TableScanOperator ts = desc.getTableScan(); + MapWork targetWork = (MapWork) context.rootToWorkMap.get(ts); + + if (targetWork == null) { + throw new AssertionError("No targetWork found for tablescan " + ts); + } + + String targetId = SparkUtilities.getWorkId(targetWork); + + BaseWork sourceWork = getEnclosingWork(pruningSink, context); + String sourceId = SparkUtilities.getWorkId(sourceWork); + + // set up temporary path to communicate between the small/big table + Path tmpPath = targetWork.getTmpPathForPartitionPruning(); + if (tmpPath == null) { + Path baseTmpPath = context.parseContext.getContext().getMRTmpPath(); + tmpPath = SparkUtilities.generateTmpPathForPartitionPruning(baseTmpPath, targetId); + targetWork.setTmpPathForPartitionPruning(tmpPath); + LOG.info("Setting tmp path between source work and target work:\n" + tmpPath); + } + + desc.setPath(new Path(tmpPath, sourceId)); + desc.setTargetWork(targetWork.getName()); + + // store table descriptor in map-targetWork + if (!targetWork.getEventSourceTableDescMap().containsKey(sourceId)) { + targetWork.getEventSourceTableDescMap().put(sourceId, new LinkedList()); + } + List tables = targetWork.getEventSourceTableDescMap().get(sourceId); + tables.add(pruningSink.getConf().getTable()); + + // store column name in map-targetWork + if (!targetWork.getEventSourceColumnNameMap().containsKey(sourceId)) { + targetWork.getEventSourceColumnNameMap().put(sourceId, new LinkedList()); + } + List columns = targetWork.getEventSourceColumnNameMap().get(sourceId); + columns.add(desc.getTargetColumnName()); + + // store partition key expr in map-targetWork + if (!targetWork.getEventSourcePartKeyExprMap().containsKey(sourceId)) { + targetWork.getEventSourcePartKeyExprMap().put(sourceId, new LinkedList()); + } + List keys = targetWork.getEventSourcePartKeyExprMap().get(sourceId); + keys.add(desc.getPartKey()); + } + public static SparkEdgeProperty getEdgeProperty(ReduceSinkOperator reduceSink, ReduceWork reduceWork) throws SemanticException { SparkEdgeProperty edgeProperty = new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE); @@ -490,4 +567,33 @@ private static boolean hasGBYOperator(ReduceSinkOperator rs) { } return false; } + + /** + * getEncosingWork finds the BaseWork any given operator belongs to. + */ + public BaseWork getEnclosingWork(Operator op, GenSparkProcContext procCtx) { + List> ops = new ArrayList>(); + findRoots(op, ops); + for (Operator r : ops) { + BaseWork work = procCtx.rootToWorkMap.get(r); + if (work != null) { + return work; + } + } + return null; + } + + /* + * findRoots returns all root operators (in ops) that result in operator op + */ + private void findRoots(Operator op, List> ops) { + List> parents = op.getParentOperators(); + if (parents == null || parents.isEmpty()) { + ops.add(op); + return; + } + for (Operator p : parents) { + findRoots(p, ops); + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/OptimizeSparkProcContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/OptimizeSparkProcContext.java index f7586a4..3b71af1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/spark/OptimizeSparkProcContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/OptimizeSparkProcContext.java @@ -20,15 +20,12 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; -import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -49,19 +46,12 @@ private final Set visitedReduceSinks = new HashSet(); private final Map mjOpSizes = new HashMap(); - // rootOperators are all the table scan operators in sequence - // of traversal - private final Deque> rootOperators; - public OptimizeSparkProcContext(HiveConf conf, ParseContext parseContext, - Set inputs, Set outputs, - Deque> rootOperators) { - + Set inputs, Set outputs) { this.conf = conf; this.parseContext = parseContext; this.inputs = inputs; this.outputs = outputs; - this.rootOperators = rootOperators; } public ParseContext getParseContext() { @@ -84,10 +74,6 @@ public HiveConf getConf() { return visitedReduceSinks; } - public Deque> getRootOperators() { - return rootOperators; - } - public Map getMjOpSizes() { return mjOpSizes; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java index 19aae70..c531a5b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -44,12 +45,14 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.UnionOperator; import org.apache.hadoop.hive.ql.exec.spark.SparkTask; +import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.CompositeProcessor; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.ForwardWalker; import org.apache.hadoop.hive.ql.lib.GraphWalker; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; @@ -59,6 +62,10 @@ import org.apache.hadoop.hive.ql.lib.TypeRule; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; +import org.apache.hadoop.hive.ql.optimizer.SparkDynamicPartitionPruningOptimization; +import org.apache.hadoop.hive.ql.optimizer.SparkRemoveDynamicPruningBySize; +import org.apache.hadoop.hive.ql.optimizer.metainfo.annotation.AnnotateWithOpTraits; import org.apache.hadoop.hive.ql.optimizer.physical.MetadataOnlyOptimizer; import org.apache.hadoop.hive.ql.optimizer.physical.NullScanOptimizer; import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext; @@ -73,6 +80,7 @@ import org.apache.hadoop.hive.ql.optimizer.spark.SparkSkewJoinResolver; import org.apache.hadoop.hive.ql.optimizer.spark.SparkSortMergeJoinFactory; import org.apache.hadoop.hive.ql.optimizer.spark.SplitSparkWorkResolver; +import org.apache.hadoop.hive.ql.optimizer.stats.annotation.AnnotateWithStatistics; import org.apache.hadoop.hive.ql.parse.GlobalLimitCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -101,22 +109,70 @@ public SparkCompiler() { protected void optimizeOperatorPlan(ParseContext pCtx, Set inputs, Set outputs) throws SemanticException { PERF_LOGGER.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE); - // Sequence of TableScan operators to be walked - Deque> deque = new LinkedList>(); - deque.addAll(pCtx.getTopOps().values()); - OptimizeSparkProcContext procCtx = new OptimizeSparkProcContext(conf, pCtx, inputs, outputs, deque); - // create a walker which walks the tree in a DFS manner while maintaining - // the operator stack. + OptimizeSparkProcContext procCtx = new OptimizeSparkProcContext(conf, pCtx, inputs, outputs); + + // Run Spark Dynamic Partition Pruning + runDynamicPartitionPruning(procCtx); + + // Annotation OP tree with statistics + runStatsAnnotation(procCtx); + + // Run Join releated optimizations + runJoinOptimizations(procCtx); + + PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE); + } + + private void runStatsAnnotation(OptimizeSparkProcContext procCtx) throws SemanticException { + new AnnotateWithStatistics().transform(procCtx.getParseContext()); + new AnnotateWithOpTraits().transform(procCtx.getParseContext()); + } + + private void runDynamicPartitionPruning(OptimizeSparkProcContext procCtx) + throws SemanticException { + if (!conf.getBoolVar(HiveConf.ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING)) { + return; + } + + ParseContext parseContext = procCtx.getParseContext(); + Map opRules = new LinkedHashMap(); + opRules.put( + new RuleRegExp(new String("Dynamic Partition Pruning"), + FilterOperator.getOperatorName() + "%"), + new SparkDynamicPartitionPruningOptimization()); + + // The dispatcher fires the processor corresponding to the closest matching + // rule and passes the context along + Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); + GraphWalker ogw = new ForwardWalker(disp); + + List topNodes = new ArrayList(); + topNodes.addAll(parseContext.getTopOps().values()); + ogw.startWalking(topNodes, null); + + // need a new run of the constant folding because we might have created lots + // of "and true and true" conditions. + if(procCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + new ConstantPropagate().transform(parseContext); + } + } + + private void runJoinOptimizations(OptimizeSparkProcContext procCtx) throws SemanticException { + ParseContext pCtx = procCtx.getParseContext(); Map opRules = new LinkedHashMap(); opRules.put(new RuleRegExp("Set parallelism - ReduceSink", - ReduceSinkOperator.getOperatorName() + "%"), - new SetSparkReducerParallelism()); + ReduceSinkOperator.getOperatorName() + "%"), + new SetSparkReducerParallelism()); opRules.put(new TypeRule(JoinOperator.class), new SparkJoinOptimizer(pCtx)); opRules.put(new TypeRule(MapJoinOperator.class), new SparkJoinHintOptimizer(pCtx)); + opRules.put(new RuleRegExp("Disabling Dynamic Partition Pruning By Size", + SparkPartitionPruningSinkOperator.getOperatorName() + "%"), + new SparkRemoveDynamicPruningBySize()); + // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); @@ -126,7 +182,6 @@ protected void optimizeOperatorPlan(ParseContext pCtx, Set inputs, ArrayList topNodes = new ArrayList(); topNodes.addAll(pCtx.getTopOps().values()); ogw.startWalking(topNodes, null); - PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_OPTIMIZE_OPERATOR_TREE); } /** @@ -137,20 +192,86 @@ protected void generateTaskTree(List> rootTasks, Pa List> mvTask, Set inputs, Set outputs) throws SemanticException { PERF_LOGGER.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE); - GenSparkUtils.getUtils().resetSequenceNumber(); - ParseContext tempParseContext = getParseContext(pCtx, rootTasks); - GenSparkWork genSparkWork = new GenSparkWork(GenSparkUtils.getUtils()); + GenSparkUtils utils = GenSparkUtils.getUtils(); + utils.resetSequenceNumber(); + ParseContext tempParseContext = getParseContext(pCtx, rootTasks); GenSparkProcContext procCtx = new GenSparkProcContext( conf, tempParseContext, mvTask, rootTasks, inputs, outputs, pCtx.getTopOps()); + // -------------------------------- First Pass ---------------------------------- // + // Identify SparkPartitionPruningSinkOperators, and break OP tree if necessary + + Map opRules = new LinkedHashMap(); + opRules.put(new RuleRegExp("Clone OP tree for PartitionPruningSink", + SparkPartitionPruningSinkOperator.getOperatorName() + "%"), + new SparkPartitionPruningOptimizer()); + + Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); + GraphWalker ogw = new GenSparkWorkWalker(disp, procCtx); + + List topNodes = new ArrayList(); + topNodes.addAll(pCtx.getTopOps().values()); + ogw.startWalking(topNodes, null); + + // -------------------------------- Second Pass ---------------------------------- // + // Break OP tree, and generate work tree + topNodes.clear(); + topNodes.addAll(procCtx.topOps.values()); + generateWorkTree(procCtx, topNodes); + + if (!procCtx.clonedPruningTableScanSet.isEmpty()) { + SparkTask pruningTask = SparkUtilities.createSparkTask(conf); + SparkTask mainTask = procCtx.currentTask; + pruningTask.addDependentTask(procCtx.currentTask); + procCtx.rootTasks.remove(procCtx.currentTask); + procCtx.rootTasks.add(pruningTask); + procCtx.currentTask = pruningTask; + + topNodes.clear(); + topNodes.addAll(procCtx.clonedPruningTableScanSet); + generateWorkTree(procCtx, topNodes); + + procCtx.currentTask = mainTask; + } + + // -------------------------------- Post Pass ---------------------------------- // + + // we need to clone some operator plans and remove union operators still + for (BaseWork w : procCtx.workWithUnionOperators) { + GenSparkUtils.getUtils().removeUnionOperators(conf, procCtx, w); + } + + // we need to fill MapWork with 'local' work and bucket information for SMB Join. + GenSparkUtils.getUtils().annotateMapWork(procCtx); + + // finally make sure the file sink operators are set up right + for (FileSinkOperator fileSink : procCtx.fileSinkSet) { + GenSparkUtils.getUtils().processFileSink(procCtx, fileSink); + } + + // Process partition pruning sinks + for (Operator prunerSink : procCtx.pruningSinkSet) { + utils.processPartitionPruningSink(procCtx, (SparkPartitionPruningSinkOperator) prunerSink); + } + + PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE); + } + + private void generateWorkTree(GenSparkProcContext procCtx, List topNodes) + throws SemanticException { // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. The dispatcher generates the plan from the operator tree Map opRules = new LinkedHashMap(); + GenSparkWork genSparkWork = new GenSparkWork(GenSparkUtils.getUtils()); + opRules.put(new RuleRegExp("Split Work - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), genSparkWork); + opRules.put(new RuleRegExp("Split Work - SparkPartitionPruningSink", + SparkPartitionPruningSinkOperator.getOperatorName() + "%"), genSparkWork); + opRules.put(new TypeRule(MapJoinOperator.class), new SparkReduceSinkMapJoinProc()); opRules.put(new RuleRegExp("Split Work + Move/Merge - FileSink", @@ -185,8 +306,10 @@ public Object process(Node n, Stack s, * SMBJoinOP * * Some of the other processors are expecting only one traversal beyond SMBJoinOp. - * We need to traverse from the big-table path only, and stop traversing on the small-table path once we reach SMBJoinOp. - * Also add some SMB join information to the context, so we can properly annotate the MapWork later on. + * We need to traverse from the big-table path only, and stop traversing on the + * small-table path once we reach SMBJoinOp. + * Also add some SMB join information to the context, so we can properly annotate + * the MapWork later on. */ opRules.put(new TypeRule(SMBMapJoinOperator.class), new NodeProcessor() { @@ -218,25 +341,8 @@ public Object process(Node currNode, Stack stack, // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); - List topNodes = new ArrayList(); - topNodes.addAll(pCtx.getTopOps().values()); GraphWalker ogw = new GenSparkWorkWalker(disp, procCtx); ogw.startWalking(topNodes, null); - - // we need to clone some operator plans and remove union operators still - for (BaseWork w: procCtx.workWithUnionOperators) { - GenSparkUtils.getUtils().removeUnionOperators(conf, procCtx, w); - } - - // we need to fill MapWork with 'local' work and bucket information for SMB Join. - GenSparkUtils.getUtils().annotateMapWork(procCtx); - - // finally make sure the file sink operators are set up right - for (FileSinkOperator fileSink: procCtx.fileSinkSet) { - GenSparkUtils.getUtils().processFileSink(procCtx, fileSink); - } - - PERF_LOGGER.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_GENERATE_TASK_TREE); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningOptimizer.java new file mode 100644 index 0000000..4f187e6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningOptimizer.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.spark; + +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.Stack; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * This processor triggers on SparkPartitionPruningSinkOperator. For a operator tree like + * this: + * + * Original Tree: + * TS TS + * | | + * FIL FIL + * | | \ + * RS RS SEL + * \ / | + * JOIN GBY + * | + * SPARKPRUNINGSINK + * + * It removes the branch containing SPARKPRUNINGSINK from the original operator tree, and splits it into + * two separate trees: + * + * Tree #1: Tree #2: + * TS TS TS + * | | | + * FIL FIL FIL + * | | | + * RS RS SEL + * \ / | + * JOIN GBY + * | + * SPARKPRUNINGSINK + * + * For MapJoinOperator, this optimizer will not do anything - it should be executed within + * the same SparkTask. + */ +public class SparkPartitionPruningOptimizer implements NodeProcessor { + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + SparkPartitionPruningSinkOperator pruningSinkOp = (SparkPartitionPruningSinkOperator) nd; + GenSparkProcContext context = (GenSparkProcContext) procCtx; + + // Locate the op where the branch starts + // This is guaranteed to succeed since the branch always follow the pattern + // as shown in the first picture above. + Operator filterOp = pruningSinkOp, selOp = null; + while (filterOp != null) { + if (filterOp.getNumChild() > 1) { + break; + } else { + selOp = filterOp; + filterOp = filterOp.getParentOperators().get(0); + } + } + + // Check if this is a MapJoin. If so, do not split. + for (Operator childOp : filterOp.getChildOperators()) { + if (childOp instanceof ReduceSinkOperator && + childOp.getChildOperators().get(0) instanceof MapJoinOperator) { + context.pruningSinkSet.add(pruningSinkOp); + return null; + } + } + + List> roots = new LinkedList>(); + collectRoots(roots, pruningSinkOp); + + List> savedChildOps = filterOp.getChildOperators(); + filterOp.setChildOperators(Utilities.makeList(selOp)); + + // Now clone the tree above selOp + List> newRoots = Utilities.cloneOperatorTree(context.parseContext.getConf(), roots); + for (int i = 0; i < roots.size(); i++) { + TableScanOperator newTs = (TableScanOperator) newRoots.get(i); + TableScanOperator oldTs = (TableScanOperator) roots.get(i); + newTs.getConf().setTableMetadata(oldTs.getConf().getTableMetadata()); + } + context.clonedPruningTableScanSet.addAll(newRoots); + + // Restore broken links between operators, and remove the branch from the original tree + filterOp.setChildOperators(savedChildOps); + filterOp.removeChild(selOp); + + // Find the cloned PruningSink and add it to pruningSinkSet + Set> sinkSet = new HashSet>(); + for (Operator root : newRoots) { + SparkUtilities.collectOp(sinkSet, root, SparkPartitionPruningSinkOperator.class); + } + Preconditions.checkArgument(sinkSet.size() == 1, + "AssertionError: expected to only contain one SparkPartitionPruningSinkOperator," + + " but found " + sinkSet.size()); + SparkPartitionPruningSinkOperator clonedPruningSinkOp = + (SparkPartitionPruningSinkOperator) sinkSet.iterator().next(); + clonedPruningSinkOp.getConf().setTableScan(pruningSinkOp.getConf().getTableScan()); + context.pruningSinkSet.add(clonedPruningSinkOp); + + return null; + } + + /** + * Recursively collect all roots (e.g., table scans) that can be reached via this op. + * @param result contains all roots can be reached via op + * @param op the op to examine. + */ + private void collectRoots(List> result, Operator op) { + if (op.getNumParent() == 0) { + result.add(op); + } else { + for (Operator parentOp : op.getParentOperators()) { + collectRoots(result, parentOp); + } + } + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java new file mode 100644 index 0000000..4767a0e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.spark; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc; +import org.apache.hadoop.hive.ql.plan.api.OperatorType; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.hive.serde2.Serializer; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.util.Collection; +import java.util.concurrent.Future; + +/** + * This operator gets partition info from the upstream operators, and write them + * to HDFS. This will later be read at the driver, and used for pruning the partitions + * for the big table side. + */ +public class SparkPartitionPruningSinkOperator extends Operator { + + @SuppressWarnings("deprecation") + protected transient Serializer serializer; + protected transient DataOutputBuffer buffer; + protected static final Log LOG = LogFactory.getLog(SparkPartitionPruningSinkOperator.class); + + @SuppressWarnings("deprecation") + public Collection> initializeOp(Configuration hconf) throws HiveException { + Collection> result = super.initializeOp(hconf); + serializer = (Serializer) ReflectionUtils.newInstance( + conf.getTable().getDeserializerClass(), null); + buffer = new DataOutputBuffer(); + return result; + } + + @Override + public void process(Object row, int tag) throws HiveException { + ObjectInspector rowInspector = inputObjInspectors[0]; + try { + + Writable writableRow = serializer.serialize(row, rowInspector); + writableRow.write(buffer); + } catch (Exception e) { + throw new HiveException(e); + } + forward(row, rowInspector); + } + + @Override + public void closeOp(boolean abort) throws HiveException { + if (!abort) { + try { + flushToFile(); + } catch (Exception e) { + throw new HiveException(e); + } + } + } + + private void flushToFile() throws IOException { + // write an intermediate file to the specified path + // the format of the path is: tmpPath/targetWorkId/sourceWorkId/randInt + Path path = new Path(conf.getPath(), String.valueOf(Utilities.randGen.nextInt())); + FileSystem fs = path.getFileSystem(this.getConfiguration()); + short numOfRepl = fs.getDefaultReplication(path); + ObjectOutputStream out = null; + + try { + out = new ObjectOutputStream(new BufferedOutputStream(fs.create(path, numOfRepl), 4096)); + out.writeUTF(conf.getTargetColumnName()); + buffer.writeTo(out); + } finally { + if (out != null) { + LOG.info("Flushed to file: " + path); + out.close(); + } + } + } + + @Override + public OperatorType getType() { + return OperatorType.SPARKPRUNINGSINK; + } + + @Override + public String getName() { + return getOperatorName(); + } + + public static String getOperatorName() { + return "SPARKPRUNINGSINK"; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java index 05a5841..5e90401 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java @@ -87,6 +87,8 @@ private Path tmpHDFSPath; + private Path tmpPathForPartitionPruning; + private String inputformat; private String indexIntermediateFile; @@ -463,6 +465,14 @@ public void setTmpHDFSPath(Path tmpHDFSPath) { this.tmpHDFSPath = tmpHDFSPath; } + public Path getTmpPathForPartitionPruning() { + return this.tmpPathForPartitionPruning; + } + + public void setTmpPathForPartitionPruning(Path tmpPathForPartitionPruning) { + this.tmpPathForPartitionPruning = tmpPathForPartitionPruning; + } + public void mergingInto(MapWork mapWork) { // currently, this is sole field affecting mergee task mapWork.useBucketizedHiveInputFormat |= useBucketizedHiveInputFormat; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index aa291b9..fce3f01 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -99,7 +99,7 @@ private transient TableSample tableSample; - private transient final Table tableMetadata; + private transient Table tableMetadata; public TableScanDesc() { @@ -274,6 +274,10 @@ public Table getTableMetadata() { return tableMetadata; } + public void setTableMetadata(Table tableMetadata) { + this.tableMetadata = tableMetadata; + } + public TableSample getTableSample() { return tableSample; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java index 363e49e..32af813 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java @@ -67,8 +67,18 @@ @Override public ParseContext transform(ParseContext pctx) throws SemanticException { - if (!pctx.getConf().getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") - || !pctx.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING)) { + boolean enabled = false; + String queryEngine = pctx.getConf().getVar(ConfVars.HIVE_EXECUTION_ENGINE); + + if (queryEngine.equals("tez") + && pctx.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING)) { + enabled = true; + } else if ((queryEngine.equals("spark") + && pctx.getConf().getBoolVar(ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING))) { + enabled = true; + } + + if (!enabled) { return pctx; } diff --git ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q new file mode 100644 index 0000000..8b83ef6 --- /dev/null +++ ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning.q @@ -0,0 +1,180 @@ +set hive.support.sql11.reserved.keywords=false; +set hive.optimize.ppd=true; +set hive.ppd.remove.duplicatefilters=true; +set hive.spark.dynamic.partition.pruning=true; +set hive.optimize.metadataonly=false; +set hive.optimize.index.filter=true; + +-- SORT_QUERY_RESULTS + +select distinct ds from srcpart; +select distinct hr from srcpart; + +EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds; +create table srcpart_date as select ds as ds, ds as date from srcpart group by ds; +create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr; +create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr; + +-- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +set hive.spark.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +set hive.spark.dynamic.partition.pruning=true; +select count(*) from srcpart where ds = '2008-04-08'; + +-- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=true; +select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; + +-- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=true; +select count(*) from srcpart where ds = '2008-04-08' and hr = 11; + +-- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +set hive.spark.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +set hive.spark.dynamic.partition.pruning=true; +select count(*) from srcpart where ds = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=true; +select count(*) from srcpart where hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +set hive.spark.dynamic.partition.pruning=true; +select count(*) from srcpart where cast(hr as string) = 11; + + +-- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart where ds = '2008-04-08'; + +-- non-equi join +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); + +-- old style join syntax +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; + +-- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); + +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask = true; +set hive.auto.convert.join.noconditionaltask.size = 10000000; + +-- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart where ds = '2008-04-08'; + +-- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; + +-- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart where ds = '2008-04-08' and hr = 11; + +-- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart where hr = 11; + +-- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart where ds = '2008-04-08'; + +-- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +-- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); + + +drop table srcpart_orc; +drop table srcpart_date; +drop table srcpart_hour; +drop table srcpart_date_hour; +drop table srcpart_double_hour; diff --git ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q new file mode 100644 index 0000000..734f187 --- /dev/null +++ ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_2.q @@ -0,0 +1,118 @@ +set hive.optimize.ppd=true; +set hive.ppd.remove.duplicatefilters=true; +set hive.spark.dynamic.partition.pruning=true; +set hive.optimize.metadataonly=false; +set hive.optimize.index.filter=true; +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask = true; +set hive.auto.convert.join.noconditionaltask.size = 10000000; + +-- SORT_QUERY_RESULTS + +create table dim_shops (id int, label string) row format delimited fields terminated by ',' stored as textfile; +load data local inpath '../../data/files/dim_shops.txt' into table dim_shops; + +create table agg_01 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile; +alter table agg_01 add partition (dim_shops_id = 1); +alter table agg_01 add partition (dim_shops_id = 2); +alter table agg_01 add partition (dim_shops_id = 3); + +load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partition (dim_shops_id=1); +load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2); +load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3); + +analyze table dim_shops compute statistics; +analyze table agg_01 partition (dim_shops_id) compute statistics; + +select * from dim_shops; +select * from agg_01; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +set hive.spark.dynamic.partition.pruning.max.data.size=1; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id; + +SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id; + +EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1; + +SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1; + +set hive.spark.dynamic.partition.pruning.max.data.size=1000000; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + + +EXPLAIN +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +UNION ALL +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; + +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +UNION ALL +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; diff --git ql/src/test/results/clientpositive/spark/bucket2.q.out ql/src/test/results/clientpositive/spark/bucket2.q.out index 89c3b4c..8bb53d5 100644 --- ql/src/test/results/clientpositive/spark/bucket2.q.out +++ ql/src/test/results/clientpositive/spark/bucket2.q.out @@ -203,14 +203,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s diff --git ql/src/test/results/clientpositive/spark/bucket3.q.out ql/src/test/results/clientpositive/spark/bucket3.q.out index 2fc4855..b25ea05 100644 --- ql/src/test/results/clientpositive/spark/bucket3.q.out +++ ql/src/test/results/clientpositive/spark/bucket3.q.out @@ -226,14 +226,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), '1' (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' diff --git ql/src/test/results/clientpositive/spark/bucket4.q.out ql/src/test/results/clientpositive/spark/bucket4.q.out index 44e0f9f..2ad59da 100644 --- ql/src/test/results/clientpositive/spark/bucket4.q.out +++ ql/src/test/results/clientpositive/spark/bucket4.q.out @@ -202,14 +202,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s diff --git ql/src/test/results/clientpositive/spark/column_access_stats.q.out ql/src/test/results/clientpositive/spark/column_access_stats.q.out index 3e16f61..2d6fce1 100644 --- ql/src/test/results/clientpositive/spark/column_access_stats.q.out +++ ql/src/test/results/clientpositive/spark/column_access_stats.q.out @@ -92,11 +92,9 @@ STAGE PLANS: Processor Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE ListSink PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1 @@ -124,11 +122,9 @@ STAGE PLANS: Processor Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE ListSink PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1 diff --git ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out index e95d2ab..6ca0527 100644 --- ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out +++ ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out @@ -16,11 +16,9 @@ STAGE PLANS: Processor Tree: TableScan alias: srcpart - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2008-04-08' (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08' diff --git ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out index e38ccf8..603e6bb 100644 --- ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out +++ ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out @@ -461,16 +461,13 @@ STAGE PLANS: Processor Tree: TableScan alias: list_bucketing_static_part - Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: ((key = '484') and (value = 'val_484')) (type: boolean) - Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' diff --git ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out index 881f41a..08879cc 100644 --- ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out +++ ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out @@ -43,16 +43,13 @@ STAGE PLANS: Processor Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE ListSink PREHOOK: query: select key from src where false diff --git ql/src/test/results/clientpositive/spark/pcr.q.out ql/src/test/results/clientpositive/spark/pcr.q.out index 4c22f0b..b6c2f07 100644 --- ql/src/test/results/clientpositive/spark/pcr.q.out +++ ql/src/test/results/clientpositive/spark/pcr.q.out @@ -5461,11 +5461,9 @@ STAGE PLANS: Processor Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: explain select key,value from srcpart where hr = cast(11 as double) @@ -5482,11 +5480,9 @@ STAGE PLANS: Processor Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: explain select key,value from srcpart where cast(hr as double) = 11 @@ -5503,10 +5499,8 @@ STAGE PLANS: Processor Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE ListSink diff --git ql/src/test/results/clientpositive/spark/sample3.q.out ql/src/test/results/clientpositive/spark/sample3.q.out index 2fe6b0d..35a4352 100644 --- ql/src/test/results/clientpositive/spark/sample3.q.out +++ ql/src/test/results/clientpositive/spark/sample3.q.out @@ -22,14 +22,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 5) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT s.key diff --git ql/src/test/results/clientpositive/spark/sample9.q.out ql/src/test/results/clientpositive/spark/sample9.q.out index c9823f7..1a84bd6 100644 --- ql/src/test/results/clientpositive/spark/sample9.q.out +++ ql/src/test/results/clientpositive/spark/sample9.q.out @@ -53,17 +53,14 @@ STAGE PLANS: Processor Tree: TableScan alias: a - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) sampleDesc: BUCKET 1 OUT OF 2 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT s.* diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out index c3f996f..b762c72 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out @@ -1911,17 +1911,14 @@ STAGE PLANS: Processor Tree: TableScan alias: test_table1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean) sampleDesc: BUCKET 2 OUT OF 16 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) @@ -2003,17 +2000,14 @@ STAGE PLANS: Processor Tree: TableScan alias: test_table3 - Statistics: Num rows: 1028 Data size: 10968 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean) sampleDesc: BUCKET 2 OUT OF 16 - Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) diff --git ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out new file mode 100644 index 0000000..e373bc2 --- /dev/null +++ ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out @@ -0,0 +1,5457 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +select distinct ds from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: -- SORT_QUERY_RESULTS + +select distinct ds from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-09 +PREHOOK: query: select distinct hr from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct hr from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +11 +12 +PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-3 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-3 + Create Table Operator: + Create Table + columns: ds string, date string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_date +POSTHOOK: query: create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_date +PREHOOK: query: create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_hour +POSTHOOK: query: create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_hour +PREHOOK: query: create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_date_hour +POSTHOOK: query: create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_date_hour +PREHOOK: query: create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_double_hour +POSTHOOK: query: create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_double_hour +PREHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 7 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Map 8 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: hr + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 5 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Map 6 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2) + Reducer 4 <- Reducer 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + value expressions: hr (type: string) + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: string) + sort order: + + Map-reduce partition columns: hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 hr (type: string) + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 5 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Map 6 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2) + Reducer 4 <- Reducer 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and hr is not null) (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + value expressions: hr (type: string) + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: string) + sort order: + + Map-reduce partition columns: hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 hr (type: string) + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: hr + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string), hr (type: string) + 1 ds (type: string), hr (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and hr is not null) (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string), hr (type: string) + 1 ds (type: string), hr (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +POSTHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +0 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +0 +PREHOOK: query: select count(*) from srcpart where ds = 'I DONT EXIST' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = 'I DONT EXIST' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +#### A masked pattern was here #### +0 +PREHOOK: query: -- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Select Operator + expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: UDFToDouble(hr) + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(hr) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(hr) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 UDFToDouble(hr) (type: double) + 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Select Operator + expressions: hr (type: double) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: (hr * 2) + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: (hr * 2) (type: double) + sort order: + + Map-reduce partition columns: (hr * 2) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: double) + sort order: + + Map-reduce partition columns: hr (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 (hr * 2) (type: double) + 1 hr (type: double) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(hr) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(hr) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 UDFToDouble(hr) (type: double) + 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: (hr * 2) (type: double) + sort order: + + Map-reduce partition columns: (hr * 2) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: double) + sort order: + + Map-reduce partition columns: hr (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 (hr * 2) (type: double) + 1 hr (type: double) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean) + Filter Operator + predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean) + Select Operator + expressions: UDFToString(hr) (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: UDFToString((hr * 2)) + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToString((hr * 2)) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToString((hr * 2)) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToString((hr * 2)) (type: string) + sort order: + + Map-reduce partition columns: UDFToString((hr * 2)) (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToString(hr) (type: string) + sort order: + + Map-reduce partition columns: UDFToString(hr) (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 UDFToString((hr * 2)) (type: string) + 1 UDFToString(hr) (type: string) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where cast(hr as string) = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where cast(hr as string) = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 7 <- Map 6 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds = '2008-04-08') (type: boolean) + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: ds + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Map 8 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Reducer 7 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 5 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 3 <- Map 5 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2) + Reducer 4 <- Reducer 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds = '2008-04-08') (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: ds + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Work 'Reducer 2' is a cross product +PREHOOK: query: -- non-equi join +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: type: QUERY +POSTHOOK: query: -- non-equi join +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + value expressions: ds (type: string), hr (type: string) + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: ((date = '2008-04-08') and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((date = '2008-04-08') and (hour = 11)) (type: boolean) + Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE + value expressions: ds (type: string), hr (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col2, _col3, _col7, _col9 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col2 = _col7) or (_col3 = _col9)) (type: boolean) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Work 'Reducer 2' is a cross product +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +1500 +PREHOOK: query: -- old style join syntax +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: type: QUERY +POSTHOOK: query: -- old style join syntax +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: hr + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string), hr (type: string) + 1 ds (type: string), hr (type: string) + outputColumnNames: _col2, _col3, _col7, _col9 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col2 = _col7) and (_col3 = _col9)) (type: boolean) + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col8 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (date = '2008-04-08') (type: boolean) + Filter Operator + predicate: (date = '2008-04-08') (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 4 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Outer Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col8 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 7 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Map 8 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Filter Operator + predicate: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Select Operator + expressions: '11' (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: hr + target column name: hr + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 5 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Map 6 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2) + Reducer 4 <- Reducer 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr = 11) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '11' (type: string) + sort order: + + Map-reduce partition columns: '11' (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '11' (type: string) + sort order: + + Map-reduce partition columns: '11' (type: string) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 hr (type: string) + Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 5 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Map 6 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2) + Reducer 4 <- Reducer 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr = 13) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr = 13) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: '13' (type: string) + sort order: + + Map-reduce partition columns: '13' (type: string) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '13' (type: string) + sort order: + + Map-reduce partition columns: '13' (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 hr (type: string) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +0 +PREHOOK: query: -- union + subquery +EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: -- union + subquery +EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 11 <- Map 10 (GROUP, 1) + Reducer 9 <- Map 8 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 10 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Map 8 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Reducer 11 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Reducer 9 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 5 <- Map 4 (GROUP, 1) + Reducer 7 <- Map 6 (GROUP, 1) + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2), Reducer 7 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Reducer 7 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2000 +PREHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 11 <- Map 10 (GROUP, 1) + Reducer 9 <- Map 8 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 10 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Map 8 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Reducer 11 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Reducer 9 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 5 <- Map 4 (GROUP, 1) + Reducer 7 <- Map 6 (GROUP, 1) + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2), Reducer 7 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + outputColumnNames: _col2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col2 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Reducer 7 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-09 +PREHOOK: query: EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 11 <- Map 10 (GROUP, 1) + Reducer 13 <- Map 12 (GROUP, 1) + Reducer 15 <- Map 14 (GROUP, 1) + Reducer 17 <- Map 16 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 10 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Map 12 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Map 14 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Map 16 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Reducer 11 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Reducer 13 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Reducer 15 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 4 + Reducer 17 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 4 + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 5 <- Map 4 (GROUP, 2) + Reducer 7 <- Map 6 (GROUP, 1) + Reducer 9 <- Map 8 (GROUP, 1) + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2), Reducer 7 (PARTITION-LEVEL SORT, 2), Reducer 9 (PARTITION-LEVEL SORT, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 8 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Reducer 3 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Reducer 7 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Reducer 9 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-08 +2008-04-09 +2008-04-09 +PREHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: ds + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: ds + target work: Map 1 + Local Work: + Map Reduce Local Work + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 _col3 (type: string) + 1 hr (type: string) + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: hr + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col3 + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col3 (type: string) + 1 hr (type: string) + input vertices: + 1 Map 4 + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string), hr (type: string) + 1 ds (type: string), hr (type: string) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: ds + target work: Map 1 + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: hr + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string), hr (type: string) + 1 ds (type: string), hr (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +POSTHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: ds + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 UDFToDouble(hr) (type: double) + 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Select Operator + expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: UDFToDouble(hr) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: hr + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 UDFToDouble(hr) (type: double) + 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 (hr * 2) (type: double) + 1 hr (type: double) + Select Operator + expressions: hr (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: (hr * 2) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: hr + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 (hr * 2) (type: double) + 1 hr (type: double) + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-1 depends on stages: Stage-3 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + + Stage: Stage-3 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds = '2008-04-08') (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2008-04-08' (type: string) + outputColumnNames: ds + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Local Work: + Map Reduce Local Work + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + target column name: ds + target work: Map 3 + + Stage: Stage-1 + Spark + Edges: + Reducer 4 <- Map 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + input vertices: + 1 Reducer 2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col8 + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-1 depends on stages: Stage-3 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (date = '2008-04-08') (type: boolean) + Filter Operator + predicate: (date = '2008-04-08') (type: boolean) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 3 + + Stage: Stage-3 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Outer Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col8 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: ds + target work: Map 1 + Local Work: + Map Reduce Local Work + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 '11' (type: string) + 1 '11' (type: string) + Select Operator + expressions: '11' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + target column name: hr + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr = 11) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 '11' (type: string) + 1 '11' (type: string) + input vertices: + 1 Map 4 + Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-3 is a root stage + Stage-2 depends on stages: Stage-3 + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-3 + Spark +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 ds (type: string) + 1 ds (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 4 <- Map 3 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr = 13) (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr = 13) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 '13' (type: string) + 1 '13' (type: string) + input vertices: + 0 Map 1 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Local Work: + Map Reduce Local Work + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +-- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +-- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 11 <- Map 10 (GROUP, 1) + Reducer 9 <- Map 8 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 10 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Map 8 + Map Operator Tree: + TableScan + alias: srcpart + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: string) + Reducer 11 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + Reducer 9 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Spark Partition Pruning Sink Operator + partition key expr: ds + target column name: ds + target work: Map 1 + + Stage: Stage-1 + Spark + Edges: + Reducer 5 <- Map 4 (GROUP, 1) + Reducer 7 <- Map 6 (GROUP, 1) + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2), Reducer 7 (PARTITION-LEVEL SORT, 2) + Reducer 3 <- Reducer 2 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + outputColumnNames: _col2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col2 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Reducer 7 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-09 +PREHOOK: query: drop table srcpart_orc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table srcpart_orc +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table srcpart_date +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_date +PREHOOK: Output: default@srcpart_date +POSTHOOK: query: drop table srcpart_date +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Output: default@srcpart_date +PREHOOK: query: drop table srcpart_hour +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_hour +PREHOOK: Output: default@srcpart_hour +POSTHOOK: query: drop table srcpart_hour +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_hour +POSTHOOK: Output: default@srcpart_hour +PREHOOK: query: drop table srcpart_date_hour +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_date_hour +PREHOOK: Output: default@srcpart_date_hour +POSTHOOK: query: drop table srcpart_date_hour +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_date_hour +POSTHOOK: Output: default@srcpart_date_hour +PREHOOK: query: drop table srcpart_double_hour +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_double_hour +PREHOOK: Output: default@srcpart_double_hour +POSTHOOK: query: drop table srcpart_double_hour +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_double_hour +POSTHOOK: Output: default@srcpart_double_hour diff --git ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_2.q.out ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_2.q.out new file mode 100644 index 0000000..a99543c --- /dev/null +++ ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_2.q.out @@ -0,0 +1,1005 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +create table dim_shops (id int, label string) row format delimited fields terminated by ',' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dim_shops +POSTHOOK: query: -- SORT_QUERY_RESULTS + +create table dim_shops (id int, label string) row format delimited fields terminated by ',' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dim_shops +PREHOOK: query: load data local inpath '../../data/files/dim_shops.txt' into table dim_shops +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@dim_shops +POSTHOOK: query: load data local inpath '../../data/files/dim_shops.txt' into table dim_shops +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@dim_shops +PREHOOK: query: create table agg_01 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@agg_01 +POSTHOOK: query: create table agg_01 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@agg_01 +PREHOOK: query: alter table agg_01 add partition (dim_shops_id = 1) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@agg_01 +POSTHOOK: query: alter table agg_01 add partition (dim_shops_id = 1) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@agg_01 +POSTHOOK: Output: default@agg_01@dim_shops_id=1 +PREHOOK: query: alter table agg_01 add partition (dim_shops_id = 2) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@agg_01 +POSTHOOK: query: alter table agg_01 add partition (dim_shops_id = 2) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@agg_01 +POSTHOOK: Output: default@agg_01@dim_shops_id=2 +PREHOOK: query: alter table agg_01 add partition (dim_shops_id = 3) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@agg_01 +POSTHOOK: query: alter table agg_01 add partition (dim_shops_id = 3) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@agg_01 +POSTHOOK: Output: default@agg_01@dim_shops_id=3 +PREHOOK: query: load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partition (dim_shops_id=1) +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@agg_01@dim_shops_id=1 +POSTHOOK: query: load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partition (dim_shops_id=1) +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@agg_01@dim_shops_id=1 +PREHOOK: query: load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2) +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@agg_01@dim_shops_id=2 +POSTHOOK: query: load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2) +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@agg_01@dim_shops_id=2 +PREHOOK: query: load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3) +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@agg_01@dim_shops_id=3 +POSTHOOK: query: load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3) +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@agg_01@dim_shops_id=3 +PREHOOK: query: analyze table dim_shops compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@dim_shops +PREHOOK: Output: default@dim_shops +POSTHOOK: query: analyze table dim_shops compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dim_shops +POSTHOOK: Output: default@dim_shops +PREHOOK: query: analyze table agg_01 partition (dim_shops_id) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Output: default@agg_01 +PREHOOK: Output: default@agg_01@dim_shops_id=1 +PREHOOK: Output: default@agg_01@dim_shops_id=2 +PREHOOK: Output: default@agg_01@dim_shops_id=3 +POSTHOOK: query: analyze table agg_01 partition (dim_shops_id) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Output: default@agg_01 +POSTHOOK: Output: default@agg_01@dim_shops_id=1 +POSTHOOK: Output: default@agg_01@dim_shops_id=2 +POSTHOOK: Output: default@agg_01@dim_shops_id=3 +PREHOOK: query: select * from dim_shops +PREHOOK: type: QUERY +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: select * from dim_shops +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +1 foo +2 bar +3 baz +PREHOOK: query: select * from agg_01 +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +#### A masked pattern was here #### +POSTHOOK: query: select * from agg_01 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +#### A masked pattern was here #### +1 1 +2 1 +3 1 +4 2 +5 2 +6 2 +7 3 +8 3 +9 3 +PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + Select Operator + expressions: id (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: dim_shops_id + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + target column name: dim_shops_id + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 3 <- Reducer 2 (SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 4 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string), _col0 (type: decimal(10,0)) + outputColumnNames: _col6, _col0 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + keys: _col6 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +bar 3 15 +foo 3 6 +PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 3 <- Reducer 2 (SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 4 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string), _col0 (type: decimal(10,0)) + outputColumnNames: _col6, _col0 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + keys: _col6 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +bar 3 15 +foo 3 6 +PREHOOK: query: EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: id is not null (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: id is not null (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col1 = _col5) (type: boolean) + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +bar +bar +bar +baz +baz +baz +foo +foo +foo +PREHOOK: query: EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id = 1) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 1 (type: int) + 1 1 (type: int) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: agg + filterExpr: (dim_shops_id = 1) (type: boolean) + Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 1 (type: int) + 1 1 (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 2 + Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +1 +2 +3 +PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-1 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + Select Operator + expressions: id (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: dim_shops_id + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + target column name: dim_shops_id + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 3 <- Reducer 2 (SORT, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 4 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string), _col0 (type: decimal(10,0)) + outputColumnNames: _col6, _col0 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + keys: _col6 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +bar 3 15 +foo 3 6 +PREHOOK: query: EXPLAIN +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +UNION ALL +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +UNION ALL +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-1 depends on stages: Stage-3 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: dim_shops + filterExpr: (id is not null and (label = 'foo')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label = 'foo')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + Select Operator + expressions: id (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: dim_shops_id + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + target column name: dim_shops_id + target work: Map 1 + Local Work: + Map Reduce Local Work + + Stage: Stage-3 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: dim_shops + filterExpr: (id is not null and (label = 'bar')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label = 'bar')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + Select Operator + expressions: id (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Spark Partition Pruning Sink Operator + partition key expr: dim_shops_id + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + target column name: dim_shops_id + target work: Map 3 + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: agg_01 + filterExpr: dim_shops_id is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Map 2 + Filter Operator + predicate: (_col1 = _col5) (type: boolean) + Select Operator + expressions: _col0 (type: decimal(10,0)) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + Map 3 + Map Operator Tree: + TableScan + alias: agg_01 + filterExpr: dim_shops_id is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Map 4 + Filter Operator + predicate: (_col1 = _col5) (type: boolean) + Select Operator + expressions: _col0 (type: decimal(10,0)) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +UNION ALL +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar' +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' +UNION ALL +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +1 +2 +3 +4 +5 +6 diff --git ql/src/test/results/clientpositive/spark/temp_table.q.out ql/src/test/results/clientpositive/spark/temp_table.q.out index 16d663d..6505901 100644 --- ql/src/test/results/clientpositive/spark/temp_table.q.out +++ ql/src/test/results/clientpositive/spark/temp_table.q.out @@ -160,14 +160,11 @@ STAGE PLANS: Processor Tree: TableScan alias: foo - Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from foo limit 10 diff --git ql/src/test/results/clientpositive/spark/udf_example_add.q.out ql/src/test/results/clientpositive/spark/udf_example_add.q.out index 7916679..587c68b 100644 --- ql/src/test/results/clientpositive/spark/udf_example_add.q.out +++ ql/src/test/results/clientpositive/spark/udf_example_add.q.out @@ -34,14 +34,11 @@ STAGE PLANS: Processor Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT example_add(1, 2), diff --git ql/src/test/results/clientpositive/spark/udf_in_file.q.out ql/src/test/results/clientpositive/spark/udf_in_file.q.out index c769d1f..4f732f5 100644 --- ql/src/test/results/clientpositive/spark/udf_in_file.q.out +++ ql/src/test/results/clientpositive/spark/udf_in_file.q.out @@ -57,14 +57,11 @@ STAGE PLANS: Processor Tree: TableScan alias: value_src - Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: in_file(str_val, '../../data/files/test2.dat') (type: boolean), in_file(ch_val, '../../data/files/test2.dat') (type: boolean), in_file(vch_val, '../../data/files/test2.dat') (type: boolean), in_file(str_val_neg, '../../data/files/test2.dat') (type: boolean), in_file(ch_val_neg, '../../data/files/test2.dat') (type: boolean), in_file(vch_val_neg, '../../data/files/test2.dat') (type: boolean), in_file('303', '../../data/files/test2.dat') (type: boolean), in_file('304', '../../data/files/test2.dat') (type: boolean), in_file(UDFToString(null), '../../data/files/test2.dat') (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE Limit Number of rows: 1 - Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE ListSink PREHOOK: query: SELECT in_file(str_val, "../../data/files/test2.dat"), diff --git ql/src/test/results/clientpositive/spark/union_view.q.out ql/src/test/results/clientpositive/spark/union_view.q.out index 593ce40..a863ff5 100644 --- ql/src/test/results/clientpositive/spark/union_view.q.out +++ ql/src/test/results/clientpositive/spark/union_view.q.out @@ -54,14 +54,11 @@ STAGE PLANS: TableScan alias: src_union_1 filterExpr: ((key = 86) and (ds = '1')) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 86) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 86 (type: int), value (type: string), '1' (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink STAGE DEPENDENCIES: @@ -75,14 +72,11 @@ STAGE PLANS: TableScan alias: src_union_2 filterExpr: ((key = 86) and (ds = '2')) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 86) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 86 (type: int), value (type: string), '2' (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink STAGE DEPENDENCIES: @@ -96,14 +90,11 @@ STAGE PLANS: TableScan alias: src_union_3 filterExpr: ((key = 86) and (ds = '3')) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 86) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 86 (type: int), value (type: string), '3' (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink 86 val_86 1 diff --git ql/src/test/results/clientpositive/spark/vector_elt.q.out ql/src/test/results/clientpositive/spark/vector_elt.q.out index 180ea15..d7a46e1 100644 --- ql/src/test/results/clientpositive/spark/vector_elt.q.out +++ ql/src/test/results/clientpositive/spark/vector_elt.q.out @@ -16,17 +16,13 @@ STAGE PLANS: Processor Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (ctinyint > 0) (type: boolean) - Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ((ctinyint % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((ctinyint % 2) + 1), cstring1, cint) (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) @@ -87,14 +83,11 @@ STAGE PLANS: Processor Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: void), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 12288 Data size: 8687616 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 707 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT elt(2, 'abc', 'defg'), diff --git ql/src/test/results/clientpositive/spark/vector_string_concat.q.out ql/src/test/results/clientpositive/spark/vector_string_concat.q.out index 9ec8538..8182525 100644 --- ql/src/test/results/clientpositive/spark/vector_string_concat.q.out +++ ql/src/test/results/clientpositive/spark/vector_string_concat.q.out @@ -117,14 +117,11 @@ STAGE PLANS: Processor Tree: TableScan alias: over1korc - Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: s (type: string), concat(concat(' ', s), ' ') (type: string), concat(concat('|', rtrim(concat(concat(' ', s), ' '))), '|') (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT s AS `string`, diff --git ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out index bafd62f..2bc5cad 100644 --- ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out +++ ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out @@ -22,17 +22,13 @@ STAGE PLANS: Processor Tree: TableScan alias: date_decimal_test - Statistics: Num rows: 12288 Data size: 1651260 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (cint is not null and cdouble is not null) (type: boolean) - Statistics: Num rows: 3072 Data size: 412815 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date), cdecimal (type: decimal(20,10)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3072 Data size: 412815 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 1340 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10 diff --git ql/src/test/results/clientpositive/spark/vectorization_div0.q.out ql/src/test/results/clientpositive/spark/vectorization_div0.q.out index 30d116f..af4e0a4 100644 --- ql/src/test/results/clientpositive/spark/vectorization_div0.q.out +++ ql/src/test/results/clientpositive/spark/vectorization_div0.q.out @@ -16,14 +16,11 @@ STAGE PLANS: Processor Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (cdouble / 0.0) (type: double) outputColumnNames: _col0 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 3000 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select cdouble / 0.0 from alltypesorc limit 100 diff --git ql/src/test/results/clientpositive/spark/vectorized_case.q.out ql/src/test/results/clientpositive/spark/vectorized_case.q.out index daf6ad3..449bdb8 100644 --- ql/src/test/results/clientpositive/spark/vectorized_case.q.out +++ ql/src/test/results/clientpositive/spark/vectorized_case.q.out @@ -44,14 +44,11 @@ STAGE PLANS: Processor Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((csmallint = 418) or (csmallint = 12205)) or (csmallint = 10583)) (type: boolean) - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select diff --git ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out index 470d9a9..444b9e0 100644 --- ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out +++ ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out @@ -116,14 +116,11 @@ STAGE PLANS: Processor Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean) - Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2.0, cdouble) (type: double), power(log2(cdouble), 2.0) (type: double), power(log2(cdouble), 2.0) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40 - Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select diff --git ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out index ef98ae9..0463d31 100644 --- ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out +++ ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out @@ -56,14 +56,11 @@ STAGE PLANS: Processor Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2)) and (cstring1 like '%')) (type: boolean) - Statistics: Num rows: 1024 Data size: 31436 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 1024 Data size: 31436 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select diff --git serde/src/gen/thrift/gen-cpp/complex_types.cpp serde/src/gen/thrift/gen-cpp/complex_types.cpp index 411e1b0..3d9e135 100644 --- serde/src/gen/thrift/gen-cpp/complex_types.cpp +++ serde/src/gen/thrift/gen-cpp/complex_types.cpp @@ -130,58 +130,71 @@ uint32_t PropValueUnion::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t PropValueUnion::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("PropValueUnion"); if (this->__isset.intValue) { + ++fcnt; xfer += oprot->writeFieldBegin("intValue", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->intValue); xfer += oprot->writeFieldEnd(); } if (this->__isset.longValue) { + ++fcnt; xfer += oprot->writeFieldBegin("longValue", ::apache::thrift::protocol::T_I64, 2); xfer += oprot->writeI64(this->longValue); xfer += oprot->writeFieldEnd(); } if (this->__isset.stringValue) { + ++fcnt; xfer += oprot->writeFieldBegin("stringValue", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->stringValue); xfer += oprot->writeFieldEnd(); } if (this->__isset.doubleValue) { + ++fcnt; xfer += oprot->writeFieldBegin("doubleValue", ::apache::thrift::protocol::T_DOUBLE, 4); xfer += oprot->writeDouble(this->doubleValue); xfer += oprot->writeFieldEnd(); } if (this->__isset.flag) { + ++fcnt; xfer += oprot->writeFieldBegin("flag", ::apache::thrift::protocol::T_BOOL, 5); xfer += oprot->writeBool(this->flag); xfer += oprot->writeFieldEnd(); } - xfer += oprot->writeFieldBegin("lString", ::apache::thrift::protocol::T_LIST, 6); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->lString.size())); - std::vector ::const_iterator _iter12; - for (_iter12 = this->lString.begin(); _iter12 != this->lString.end(); ++_iter12) + if (this->__isset.lString) { + ++fcnt; + xfer += oprot->writeFieldBegin("lString", ::apache::thrift::protocol::T_LIST, 6); { - xfer += oprot->writeString((*_iter12)); + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->lString.size())); + std::vector ::const_iterator _iter12; + for (_iter12 = this->lString.begin(); _iter12 != this->lString.end(); ++_iter12) + { + xfer += oprot->writeString((*_iter12)); + } + xfer += oprot->writeListEnd(); } - xfer += oprot->writeListEnd(); + xfer += oprot->writeFieldEnd(); } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("unionMStringString", ::apache::thrift::protocol::T_MAP, 7); - { - xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->unionMStringString.size())); - std::map ::const_iterator _iter13; - for (_iter13 = this->unionMStringString.begin(); _iter13 != this->unionMStringString.end(); ++_iter13) + if (this->__isset.unionMStringString) { + ++fcnt; + xfer += oprot->writeFieldBegin("unionMStringString", ::apache::thrift::protocol::T_MAP, 7); { - xfer += oprot->writeString(_iter13->first); - xfer += oprot->writeString(_iter13->second); + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->unionMStringString.size())); + std::map ::const_iterator _iter13; + for (_iter13 = this->unionMStringString.begin(); _iter13 != this->unionMStringString.end(); ++_iter13) + { + xfer += oprot->writeString(_iter13->first); + xfer += oprot->writeString(_iter13->second); + } + xfer += oprot->writeMapEnd(); } - xfer += oprot->writeMapEnd(); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); } - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -260,16 +273,20 @@ uint32_t IntString::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t IntString::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("IntString"); + ++fcnt; xfer += oprot->writeFieldBegin("myint", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->myint); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("myString", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->myString); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("underscore_int", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32(this->underscore_int); xfer += oprot->writeFieldEnd(); @@ -500,16 +517,20 @@ uint32_t Complex::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t Complex::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("Complex"); + ++fcnt; xfer += oprot->writeFieldBegin("aint", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->aint); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("aString", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->aString); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lint", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->lint.size())); @@ -522,6 +543,7 @@ uint32_t Complex::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lString", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->lString.size())); @@ -534,6 +556,7 @@ uint32_t Complex::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("lintString", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->lintString.size())); @@ -546,6 +569,7 @@ uint32_t Complex::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("mStringString", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->mStringString.size())); @@ -559,6 +583,7 @@ uint32_t Complex::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("attributes", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_MAP, static_cast(this->attributes.size())); @@ -590,14 +615,17 @@ uint32_t Complex::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("unionField1", ::apache::thrift::protocol::T_STRUCT, 8); xfer += this->unionField1.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("unionField2", ::apache::thrift::protocol::T_STRUCT, 9); xfer += this->unionField2.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("unionField3", ::apache::thrift::protocol::T_STRUCT, 10); xfer += this->unionField3.write(oprot); xfer += oprot->writeFieldEnd(); @@ -688,8 +716,10 @@ uint32_t SetIntString::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t SetIntString::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("SetIntString"); + ++fcnt; xfer += oprot->writeFieldBegin("sIntString", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sIntString.size())); @@ -702,6 +732,7 @@ uint32_t SetIntString::write(::apache::thrift::protocol::TProtocol* oprot) const } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("aString", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->aString); xfer += oprot->writeFieldEnd(); diff --git serde/src/gen/thrift/gen-cpp/complex_types.h serde/src/gen/thrift/gen-cpp/complex_types.h index 3f4c760..042e0af 100644 --- serde/src/gen/thrift/gen-cpp/complex_types.h +++ serde/src/gen/thrift/gen-cpp/complex_types.h @@ -50,35 +50,44 @@ class PropValueUnion { void __set_intValue(const int32_t val) { intValue = val; + __isset = _PropValueUnion__isset(); __isset.intValue = true; } void __set_longValue(const int64_t val) { longValue = val; + __isset = _PropValueUnion__isset(); __isset.longValue = true; } void __set_stringValue(const std::string& val) { stringValue = val; + __isset = _PropValueUnion__isset(); __isset.stringValue = true; } void __set_doubleValue(const double val) { doubleValue = val; + __isset = _PropValueUnion__isset(); __isset.doubleValue = true; } void __set_flag(const bool val) { flag = val; + __isset = _PropValueUnion__isset(); __isset.flag = true; } void __set_lString(const std::vector & val) { lString = val; + __isset = _PropValueUnion__isset(); + __isset.lString = true; } void __set_unionMStringString(const std::map & val) { unionMStringString = val; + __isset = _PropValueUnion__isset(); + __isset.unionMStringString = true; } bool operator == (const PropValueUnion & rhs) const @@ -103,9 +112,13 @@ class PropValueUnion { return false; else if (__isset.flag && !(flag == rhs.flag)) return false; - if (!(lString == rhs.lString)) + if (__isset.lString != rhs.__isset.lString) + return false; + else if (__isset.lString && !(lString == rhs.lString)) + return false; + if (__isset.unionMStringString != rhs.__isset.unionMStringString) return false; - if (!(unionMStringString == rhs.unionMStringString)) + else if (__isset.unionMStringString && !(unionMStringString == rhs.unionMStringString)) return false; return true; } @@ -148,14 +161,17 @@ class IntString { void __set_myint(const int32_t val) { myint = val; + __isset.myint = true; } void __set_myString(const std::string& val) { myString = val; + __isset.myString = true; } void __set_underscore_int(const int32_t val) { underscore_int = val; + __isset.underscore_int = true; } bool operator == (const IntString & rhs) const @@ -221,42 +237,52 @@ class Complex { void __set_aint(const int32_t val) { aint = val; + __isset.aint = true; } void __set_aString(const std::string& val) { aString = val; + __isset.aString = true; } void __set_lint(const std::vector & val) { lint = val; + __isset.lint = true; } void __set_lString(const std::vector & val) { lString = val; + __isset.lString = true; } void __set_lintString(const std::vector & val) { lintString = val; + __isset.lintString = true; } void __set_mStringString(const std::map & val) { mStringString = val; + __isset.mStringString = true; } void __set_attributes(const std::map > > & val) { attributes = val; + __isset.attributes = true; } void __set_unionField1(const PropValueUnion& val) { unionField1 = val; + __isset.unionField1 = true; } void __set_unionField2(const PropValueUnion& val) { unionField2 = val; + __isset.unionField2 = true; } void __set_unionField3(const PropValueUnion& val) { unionField3 = val; + __isset.unionField3 = true; } bool operator == (const Complex & rhs) const @@ -320,10 +346,12 @@ class SetIntString { void __set_sIntString(const std::set & val) { sIntString = val; + __isset.sIntString = true; } void __set_aString(const std::string& val) { aString = val; + __isset.aString = true; } bool operator == (const SetIntString & rhs) const diff --git serde/src/gen/thrift/gen-cpp/megastruct_types.cpp serde/src/gen/thrift/gen-cpp/megastruct_types.cpp index 2d46b7f..5bf2987 100644 --- serde/src/gen/thrift/gen-cpp/megastruct_types.cpp +++ serde/src/gen/thrift/gen-cpp/megastruct_types.cpp @@ -75,14 +75,17 @@ uint32_t MiniStruct::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t MiniStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("MiniStruct"); if (this->__isset.my_string) { + ++fcnt; xfer += oprot->writeFieldBegin("my_string", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->my_string); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enum) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enum", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->my_enum); xfer += oprot->writeFieldEnd(); @@ -499,49 +502,59 @@ uint32_t MegaStruct::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("MegaStruct"); if (this->__isset.my_bool) { + ++fcnt; xfer += oprot->writeFieldBegin("my_bool", ::apache::thrift::protocol::T_BOOL, 1); xfer += oprot->writeBool(this->my_bool); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_byte) { + ++fcnt; xfer += oprot->writeFieldBegin("my_byte", ::apache::thrift::protocol::T_BYTE, 2); xfer += oprot->writeByte(this->my_byte); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_16bit_int) { + ++fcnt; xfer += oprot->writeFieldBegin("my_16bit_int", ::apache::thrift::protocol::T_I16, 3); xfer += oprot->writeI16(this->my_16bit_int); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_32bit_int) { + ++fcnt; xfer += oprot->writeFieldBegin("my_32bit_int", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->my_32bit_int); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_64bit_int) { + ++fcnt; xfer += oprot->writeFieldBegin("my_64bit_int", ::apache::thrift::protocol::T_I64, 5); xfer += oprot->writeI64(this->my_64bit_int); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_double) { + ++fcnt; xfer += oprot->writeFieldBegin("my_double", ::apache::thrift::protocol::T_DOUBLE, 6); xfer += oprot->writeDouble(this->my_double); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_string) { + ++fcnt; xfer += oprot->writeFieldBegin("my_string", ::apache::thrift::protocol::T_STRING, 7); xfer += oprot->writeString(this->my_string); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_binary) { + ++fcnt; xfer += oprot->writeFieldBegin("my_binary", ::apache::thrift::protocol::T_STRING, 8); xfer += oprot->writeBinary(this->my_binary); xfer += oprot->writeFieldEnd(); } if (this->__isset.my_string_string_map) { + ++fcnt; xfer += oprot->writeFieldBegin("my_string_string_map", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->my_string_string_map.size())); @@ -556,6 +569,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_string_enum_map) { + ++fcnt; xfer += oprot->writeFieldBegin("my_string_enum_map", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I32, static_cast(this->my_string_enum_map.size())); @@ -570,6 +584,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enum_string_map) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enum_string_map", ::apache::thrift::protocol::T_MAP, 11); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I32, ::apache::thrift::protocol::T_STRING, static_cast(this->my_enum_string_map.size())); @@ -584,6 +599,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enum_struct_map) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enum_struct_map", ::apache::thrift::protocol::T_MAP, 12); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I32, ::apache::thrift::protocol::T_STRUCT, static_cast(this->my_enum_struct_map.size())); @@ -598,6 +614,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enum_stringlist_map) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enum_stringlist_map", ::apache::thrift::protocol::T_MAP, 13); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I32, ::apache::thrift::protocol::T_LIST, static_cast(this->my_enum_stringlist_map.size())); @@ -620,6 +637,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enum_structlist_map) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enum_structlist_map", ::apache::thrift::protocol::T_MAP, 14); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I32, ::apache::thrift::protocol::T_LIST, static_cast(this->my_enum_structlist_map.size())); @@ -642,6 +660,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_stringlist) { + ++fcnt; xfer += oprot->writeFieldBegin("my_stringlist", ::apache::thrift::protocol::T_LIST, 15); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->my_stringlist.size())); @@ -655,6 +674,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_structlist) { + ++fcnt; xfer += oprot->writeFieldBegin("my_structlist", ::apache::thrift::protocol::T_LIST, 16); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->my_structlist.size())); @@ -668,6 +688,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enumlist) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enumlist", ::apache::thrift::protocol::T_LIST, 17); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->my_enumlist.size())); @@ -681,6 +702,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_stringset) { + ++fcnt; xfer += oprot->writeFieldBegin("my_stringset", ::apache::thrift::protocol::T_SET, 18); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->my_stringset.size())); @@ -694,6 +716,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_enumset) { + ++fcnt; xfer += oprot->writeFieldBegin("my_enumset", ::apache::thrift::protocol::T_SET, 19); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I32, static_cast(this->my_enumset.size())); @@ -707,6 +730,7 @@ uint32_t MegaStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.my_structset) { + ++fcnt; xfer += oprot->writeFieldBegin("my_structset", ::apache::thrift::protocol::T_SET, 20); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->my_structset.size())); diff --git serde/src/gen/thrift/gen-cpp/testthrift_types.cpp serde/src/gen/thrift/gen-cpp/testthrift_types.cpp index 7949f23..8be9cca 100644 --- serde/src/gen/thrift/gen-cpp/testthrift_types.cpp +++ serde/src/gen/thrift/gen-cpp/testthrift_types.cpp @@ -55,8 +55,10 @@ uint32_t InnerStruct::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t InnerStruct::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("InnerStruct"); + ++fcnt; xfer += oprot->writeFieldBegin("field0", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->field0); xfer += oprot->writeFieldEnd(); @@ -145,16 +147,20 @@ uint32_t ThriftTestObj::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t ThriftTestObj::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftTestObj"); + ++fcnt; xfer += oprot->writeFieldBegin("field1", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->field1); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("field2", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->field2); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("field3", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->field3.size())); diff --git serde/src/gen/thrift/gen-cpp/testthrift_types.h serde/src/gen/thrift/gen-cpp/testthrift_types.h index 6c84b9f..5830edb 100644 --- serde/src/gen/thrift/gen-cpp/testthrift_types.h +++ serde/src/gen/thrift/gen-cpp/testthrift_types.h @@ -38,6 +38,7 @@ class InnerStruct { void __set_field0(const int32_t val) { field0 = val; + __isset.field0 = true; } bool operator == (const InnerStruct & rhs) const @@ -85,14 +86,17 @@ class ThriftTestObj { void __set_field1(const int32_t val) { field1 = val; + __isset.field1 = true; } void __set_field2(const std::string& val) { field2 = val; + __isset.field2 = true; } void __set_field3(const std::vector & val) { field3 = val; + __isset.field3 = true; } bool operator == (const ThriftTestObj & rhs) const diff --git serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java index dda3c5f..1b708dd 100644 --- serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java +++ serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java @@ -528,7 +528,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ThriftTestObj struc struct.field3 = new ArrayList(_list0.size); for (int _i1 = 0; _i1 < _list0.size; ++_i1) { - InnerStruct _elem2; // optional + InnerStruct _elem2; // required _elem2 = new InnerStruct(); _elem2.read(iprot); struct.field3.add(_elem2); @@ -636,7 +636,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ThriftTestObj struct struct.field3 = new ArrayList(_list5.size); for (int _i6 = 0; _i6 < _list5.size; ++_i6) { - InnerStruct _elem7; // optional + InnerStruct _elem7; // required _elem7 = new InnerStruct(); _elem7.read(iprot); struct.field3.add(_elem7); diff --git serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java index ff0c1f2..07ea8b9 100644 --- serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java +++ serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java @@ -1211,7 +1211,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Complex struct) thr struct.lint = new ArrayList(_list18.size); for (int _i19 = 0; _i19 < _list18.size; ++_i19) { - int _elem20; // optional + int _elem20; // required _elem20 = iprot.readI32(); struct.lint.add(_elem20); } @@ -1229,7 +1229,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Complex struct) thr struct.lString = new ArrayList(_list21.size); for (int _i22 = 0; _i22 < _list21.size; ++_i22) { - String _elem23; // optional + String _elem23; // required _elem23 = iprot.readString(); struct.lString.add(_elem23); } @@ -1247,7 +1247,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Complex struct) thr struct.lintString = new ArrayList(_list24.size); for (int _i25 = 0; _i25 < _list24.size; ++_i25) { - IntString _elem26; // optional + IntString _elem26; // required _elem26 = new IntString(); _elem26.read(iprot); struct.lintString.add(_elem26); @@ -1610,7 +1610,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro struct.lint = new ArrayList(_list57.size); for (int _i58 = 0; _i58 < _list57.size; ++_i58) { - int _elem59; // optional + int _elem59; // required _elem59 = iprot.readI32(); struct.lint.add(_elem59); } @@ -1623,7 +1623,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro struct.lString = new ArrayList(_list60.size); for (int _i61 = 0; _i61 < _list60.size; ++_i61) { - String _elem62; // optional + String _elem62; // required _elem62 = iprot.readString(); struct.lString.add(_elem62); } @@ -1636,7 +1636,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro struct.lintString = new ArrayList(_list63.size); for (int _i64 = 0; _i64 < _list63.size; ++_i64) { - IntString _elem65; // optional + IntString _elem65; // required _elem65 = new IntString(); _elem65.read(iprot); struct.lintString.add(_elem65); diff --git serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java index fba49e4..386fef9 100644 --- serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java +++ serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java @@ -2280,7 +2280,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) _val19 = new ArrayList(_list20.size); for (int _i21 = 0; _i21 < _list20.size; ++_i21) { - String _elem22; // optional + String _elem22; // required _elem22 = iprot.readString(); _val19.add(_elem22); } @@ -2310,7 +2310,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) _val26 = new ArrayList(_list27.size); for (int _i28 = 0; _i28 < _list27.size; ++_i28) { - MiniStruct _elem29; // optional + MiniStruct _elem29; // required _elem29 = new MiniStruct(); _elem29.read(iprot); _val26.add(_elem29); @@ -2333,7 +2333,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) struct.my_stringlist = new ArrayList(_list30.size); for (int _i31 = 0; _i31 < _list30.size; ++_i31) { - String _elem32; // optional + String _elem32; // required _elem32 = iprot.readString(); struct.my_stringlist.add(_elem32); } @@ -2351,7 +2351,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) struct.my_structlist = new ArrayList(_list33.size); for (int _i34 = 0; _i34 < _list33.size; ++_i34) { - MiniStruct _elem35; // optional + MiniStruct _elem35; // required _elem35 = new MiniStruct(); _elem35.read(iprot); struct.my_structlist.add(_elem35); @@ -2370,7 +2370,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) struct.my_enumlist = new ArrayList(_list36.size); for (int _i37 = 0; _i37 < _list36.size; ++_i37) { - MyEnum _elem38; // optional + MyEnum _elem38; // required _elem38 = MyEnum.findByValue(iprot.readI32()); struct.my_enumlist.add(_elem38); } @@ -2388,7 +2388,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) struct.my_stringset = new HashSet(2*_set39.size); for (int _i40 = 0; _i40 < _set39.size; ++_i40) { - String _elem41; // optional + String _elem41; // required _elem41 = iprot.readString(); struct.my_stringset.add(_elem41); } @@ -2406,7 +2406,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) struct.my_enumset = new HashSet(2*_set42.size); for (int _i43 = 0; _i43 < _set42.size; ++_i43) { - MyEnum _elem44; // optional + MyEnum _elem44; // required _elem44 = MyEnum.findByValue(iprot.readI32()); struct.my_enumset.add(_elem44); } @@ -2424,7 +2424,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, MegaStruct struct) struct.my_structset = new HashSet(2*_set45.size); for (int _i46 = 0; _i46 < _set45.size; ++_i46) { - MiniStruct _elem47; // optional + MiniStruct _elem47; // required _elem47 = new MiniStruct(); _elem47.read(iprot); struct.my_structset.add(_elem47); @@ -3023,7 +3023,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t _val95 = new ArrayList(_list96.size); for (int _i97 = 0; _i97 < _list96.size; ++_i97) { - String _elem98; // optional + String _elem98; // required _elem98 = iprot.readString(); _val95.add(_elem98); } @@ -3047,7 +3047,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t _val102 = new ArrayList(_list103.size); for (int _i104 = 0; _i104 < _list103.size; ++_i104) { - MiniStruct _elem105; // optional + MiniStruct _elem105; // required _elem105 = new MiniStruct(); _elem105.read(iprot); _val102.add(_elem105); @@ -3064,7 +3064,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t struct.my_stringlist = new ArrayList(_list106.size); for (int _i107 = 0; _i107 < _list106.size; ++_i107) { - String _elem108; // optional + String _elem108; // required _elem108 = iprot.readString(); struct.my_stringlist.add(_elem108); } @@ -3077,7 +3077,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t struct.my_structlist = new ArrayList(_list109.size); for (int _i110 = 0; _i110 < _list109.size; ++_i110) { - MiniStruct _elem111; // optional + MiniStruct _elem111; // required _elem111 = new MiniStruct(); _elem111.read(iprot); struct.my_structlist.add(_elem111); @@ -3091,7 +3091,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t struct.my_enumlist = new ArrayList(_list112.size); for (int _i113 = 0; _i113 < _list112.size; ++_i113) { - MyEnum _elem114; // optional + MyEnum _elem114; // required _elem114 = MyEnum.findByValue(iprot.readI32()); struct.my_enumlist.add(_elem114); } @@ -3104,7 +3104,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t struct.my_stringset = new HashSet(2*_set115.size); for (int _i116 = 0; _i116 < _set115.size; ++_i116) { - String _elem117; // optional + String _elem117; // required _elem117 = iprot.readString(); struct.my_stringset.add(_elem117); } @@ -3117,7 +3117,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t struct.my_enumset = new HashSet(2*_set118.size); for (int _i119 = 0; _i119 < _set118.size; ++_i119) { - MyEnum _elem120; // optional + MyEnum _elem120; // required _elem120 = MyEnum.findByValue(iprot.readI32()); struct.my_enumset.add(_elem120); } @@ -3130,7 +3130,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, MegaStruct struct) t struct.my_structset = new HashSet(2*_set121.size); for (int _i122 = 0; _i122 < _set121.size; ++_i122) { - MiniStruct _elem123; // optional + MiniStruct _elem123; // required _elem123 = new MiniStruct(); _elem123.read(iprot); struct.my_structset.add(_elem123); diff --git serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/PropValueUnion.java serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/PropValueUnion.java index a50a508..aa56dc9 100644 --- serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/PropValueUnion.java +++ serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/PropValueUnion.java @@ -300,7 +300,7 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip lString = new ArrayList(_list0.size); for (int _i1 = 0; _i1 < _list0.size; ++_i1) { - String _elem2; // optional + String _elem2; // required _elem2 = iprot.readString(); lString.add(_elem2); } @@ -423,7 +423,7 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot lString = new ArrayList(_list9.size); for (int _i10 = 0; _i10 < _list9.size; ++_i10) { - String _elem11; // optional + String _elem11; // required _elem11 = iprot.readString(); lString.add(_elem11); } diff --git serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java index 334d225..676f2b2 100644 --- serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java +++ serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java @@ -431,7 +431,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetIntString struct struct.sIntString = new HashSet(2*_set82.size); for (int _i83 = 0; _i83 < _set82.size; ++_i83) { - IntString _elem84; // optional + IntString _elem84; // required _elem84 = new IntString(); _elem84.read(iprot); struct.sIntString.add(_elem84); @@ -530,7 +530,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetIntString struct) struct.sIntString = new HashSet(2*_set87.size); for (int _i88 = 0; _i88 < _set87.size; ++_i88) { - IntString _elem89; // optional + IntString _elem89; // required _elem89 = new IntString(); _elem89.read(iprot); struct.sIntString.add(_elem89); diff --git service/src/gen/thrift/gen-cpp/TCLIService.cpp service/src/gen/thrift/gen-cpp/TCLIService.cpp index 209ce63..163bad7 100644 --- service/src/gen/thrift/gen-cpp/TCLIService.cpp +++ service/src/gen/thrift/gen-cpp/TCLIService.cpp @@ -50,8 +50,10 @@ uint32_t TCLIService_OpenSession_args::read(::apache::thrift::protocol::TProtoco uint32_t TCLIService_OpenSession_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_OpenSession_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -63,8 +65,10 @@ uint32_t TCLIService_OpenSession_args::write(::apache::thrift::protocol::TProtoc uint32_t TCLIService_OpenSession_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_OpenSession_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -212,8 +216,10 @@ uint32_t TCLIService_CloseSession_args::read(::apache::thrift::protocol::TProtoc uint32_t TCLIService_CloseSession_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CloseSession_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -225,8 +231,10 @@ uint32_t TCLIService_CloseSession_args::write(::apache::thrift::protocol::TProto uint32_t TCLIService_CloseSession_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CloseSession_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -374,8 +382,10 @@ uint32_t TCLIService_GetInfo_args::read(::apache::thrift::protocol::TProtocol* i uint32_t TCLIService_GetInfo_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetInfo_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -387,8 +397,10 @@ uint32_t TCLIService_GetInfo_args::write(::apache::thrift::protocol::TProtocol* uint32_t TCLIService_GetInfo_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetInfo_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -536,8 +548,10 @@ uint32_t TCLIService_ExecuteStatement_args::read(::apache::thrift::protocol::TPr uint32_t TCLIService_ExecuteStatement_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_ExecuteStatement_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -549,8 +563,10 @@ uint32_t TCLIService_ExecuteStatement_args::write(::apache::thrift::protocol::TP uint32_t TCLIService_ExecuteStatement_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_ExecuteStatement_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -698,8 +714,10 @@ uint32_t TCLIService_GetTypeInfo_args::read(::apache::thrift::protocol::TProtoco uint32_t TCLIService_GetTypeInfo_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetTypeInfo_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -711,8 +729,10 @@ uint32_t TCLIService_GetTypeInfo_args::write(::apache::thrift::protocol::TProtoc uint32_t TCLIService_GetTypeInfo_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetTypeInfo_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -860,8 +880,10 @@ uint32_t TCLIService_GetCatalogs_args::read(::apache::thrift::protocol::TProtoco uint32_t TCLIService_GetCatalogs_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetCatalogs_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -873,8 +895,10 @@ uint32_t TCLIService_GetCatalogs_args::write(::apache::thrift::protocol::TProtoc uint32_t TCLIService_GetCatalogs_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetCatalogs_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1022,8 +1046,10 @@ uint32_t TCLIService_GetSchemas_args::read(::apache::thrift::protocol::TProtocol uint32_t TCLIService_GetSchemas_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetSchemas_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1035,8 +1061,10 @@ uint32_t TCLIService_GetSchemas_args::write(::apache::thrift::protocol::TProtoco uint32_t TCLIService_GetSchemas_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetSchemas_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1184,8 +1212,10 @@ uint32_t TCLIService_GetTables_args::read(::apache::thrift::protocol::TProtocol* uint32_t TCLIService_GetTables_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetTables_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1197,8 +1227,10 @@ uint32_t TCLIService_GetTables_args::write(::apache::thrift::protocol::TProtocol uint32_t TCLIService_GetTables_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetTables_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1346,8 +1378,10 @@ uint32_t TCLIService_GetTableTypes_args::read(::apache::thrift::protocol::TProto uint32_t TCLIService_GetTableTypes_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetTableTypes_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1359,8 +1393,10 @@ uint32_t TCLIService_GetTableTypes_args::write(::apache::thrift::protocol::TProt uint32_t TCLIService_GetTableTypes_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetTableTypes_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1508,8 +1544,10 @@ uint32_t TCLIService_GetColumns_args::read(::apache::thrift::protocol::TProtocol uint32_t TCLIService_GetColumns_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetColumns_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1521,8 +1559,10 @@ uint32_t TCLIService_GetColumns_args::write(::apache::thrift::protocol::TProtoco uint32_t TCLIService_GetColumns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetColumns_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1670,8 +1710,10 @@ uint32_t TCLIService_GetFunctions_args::read(::apache::thrift::protocol::TProtoc uint32_t TCLIService_GetFunctions_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetFunctions_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1683,8 +1725,10 @@ uint32_t TCLIService_GetFunctions_args::write(::apache::thrift::protocol::TProto uint32_t TCLIService_GetFunctions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetFunctions_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1832,8 +1876,10 @@ uint32_t TCLIService_GetOperationStatus_args::read(::apache::thrift::protocol::T uint32_t TCLIService_GetOperationStatus_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetOperationStatus_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -1845,8 +1891,10 @@ uint32_t TCLIService_GetOperationStatus_args::write(::apache::thrift::protocol:: uint32_t TCLIService_GetOperationStatus_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetOperationStatus_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -1994,8 +2042,10 @@ uint32_t TCLIService_CancelOperation_args::read(::apache::thrift::protocol::TPro uint32_t TCLIService_CancelOperation_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CancelOperation_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2007,8 +2057,10 @@ uint32_t TCLIService_CancelOperation_args::write(::apache::thrift::protocol::TPr uint32_t TCLIService_CancelOperation_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CancelOperation_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2156,8 +2208,10 @@ uint32_t TCLIService_CloseOperation_args::read(::apache::thrift::protocol::TProt uint32_t TCLIService_CloseOperation_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CloseOperation_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2169,8 +2223,10 @@ uint32_t TCLIService_CloseOperation_args::write(::apache::thrift::protocol::TPro uint32_t TCLIService_CloseOperation_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CloseOperation_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2318,8 +2374,10 @@ uint32_t TCLIService_GetResultSetMetadata_args::read(::apache::thrift::protocol: uint32_t TCLIService_GetResultSetMetadata_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetResultSetMetadata_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2331,8 +2389,10 @@ uint32_t TCLIService_GetResultSetMetadata_args::write(::apache::thrift::protocol uint32_t TCLIService_GetResultSetMetadata_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetResultSetMetadata_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2480,8 +2540,10 @@ uint32_t TCLIService_FetchResults_args::read(::apache::thrift::protocol::TProtoc uint32_t TCLIService_FetchResults_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_FetchResults_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2493,8 +2555,10 @@ uint32_t TCLIService_FetchResults_args::write(::apache::thrift::protocol::TProto uint32_t TCLIService_FetchResults_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_FetchResults_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2642,8 +2706,10 @@ uint32_t TCLIService_GetDelegationToken_args::read(::apache::thrift::protocol::T uint32_t TCLIService_GetDelegationToken_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetDelegationToken_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2655,8 +2721,10 @@ uint32_t TCLIService_GetDelegationToken_args::write(::apache::thrift::protocol:: uint32_t TCLIService_GetDelegationToken_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_GetDelegationToken_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2804,8 +2872,10 @@ uint32_t TCLIService_CancelDelegationToken_args::read(::apache::thrift::protocol uint32_t TCLIService_CancelDelegationToken_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CancelDelegationToken_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2817,8 +2887,10 @@ uint32_t TCLIService_CancelDelegationToken_args::write(::apache::thrift::protoco uint32_t TCLIService_CancelDelegationToken_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_CancelDelegationToken_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -2966,8 +3038,10 @@ uint32_t TCLIService_RenewDelegationToken_args::read(::apache::thrift::protocol: uint32_t TCLIService_RenewDelegationToken_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_RenewDelegationToken_args"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->req.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2979,8 +3053,10 @@ uint32_t TCLIService_RenewDelegationToken_args::write(::apache::thrift::protocol uint32_t TCLIService_RenewDelegationToken_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCLIService_RenewDelegationToken_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->req)).write(oprot); xfer += oprot->writeFieldEnd(); diff --git service/src/gen/thrift/gen-cpp/TCLIService.h service/src/gen/thrift/gen-cpp/TCLIService.h index 030475b..f87987b 100644 --- service/src/gen/thrift/gen-cpp/TCLIService.h +++ service/src/gen/thrift/gen-cpp/TCLIService.h @@ -141,6 +141,7 @@ class TCLIService_OpenSession_args { void __set_req(const TOpenSessionReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_OpenSession_args & rhs) const @@ -192,6 +193,7 @@ class TCLIService_OpenSession_result { void __set_success(const TOpenSessionResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_OpenSession_result & rhs) const @@ -249,6 +251,7 @@ class TCLIService_CloseSession_args { void __set_req(const TCloseSessionReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_CloseSession_args & rhs) const @@ -300,6 +303,7 @@ class TCLIService_CloseSession_result { void __set_success(const TCloseSessionResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_CloseSession_result & rhs) const @@ -357,6 +361,7 @@ class TCLIService_GetInfo_args { void __set_req(const TGetInfoReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetInfo_args & rhs) const @@ -408,6 +413,7 @@ class TCLIService_GetInfo_result { void __set_success(const TGetInfoResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetInfo_result & rhs) const @@ -465,6 +471,7 @@ class TCLIService_ExecuteStatement_args { void __set_req(const TExecuteStatementReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_ExecuteStatement_args & rhs) const @@ -516,6 +523,7 @@ class TCLIService_ExecuteStatement_result { void __set_success(const TExecuteStatementResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_ExecuteStatement_result & rhs) const @@ -573,6 +581,7 @@ class TCLIService_GetTypeInfo_args { void __set_req(const TGetTypeInfoReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetTypeInfo_args & rhs) const @@ -624,6 +633,7 @@ class TCLIService_GetTypeInfo_result { void __set_success(const TGetTypeInfoResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetTypeInfo_result & rhs) const @@ -681,6 +691,7 @@ class TCLIService_GetCatalogs_args { void __set_req(const TGetCatalogsReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetCatalogs_args & rhs) const @@ -732,6 +743,7 @@ class TCLIService_GetCatalogs_result { void __set_success(const TGetCatalogsResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetCatalogs_result & rhs) const @@ -789,6 +801,7 @@ class TCLIService_GetSchemas_args { void __set_req(const TGetSchemasReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetSchemas_args & rhs) const @@ -840,6 +853,7 @@ class TCLIService_GetSchemas_result { void __set_success(const TGetSchemasResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetSchemas_result & rhs) const @@ -897,6 +911,7 @@ class TCLIService_GetTables_args { void __set_req(const TGetTablesReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetTables_args & rhs) const @@ -948,6 +963,7 @@ class TCLIService_GetTables_result { void __set_success(const TGetTablesResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetTables_result & rhs) const @@ -1005,6 +1021,7 @@ class TCLIService_GetTableTypes_args { void __set_req(const TGetTableTypesReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetTableTypes_args & rhs) const @@ -1056,6 +1073,7 @@ class TCLIService_GetTableTypes_result { void __set_success(const TGetTableTypesResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetTableTypes_result & rhs) const @@ -1113,6 +1131,7 @@ class TCLIService_GetColumns_args { void __set_req(const TGetColumnsReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetColumns_args & rhs) const @@ -1164,6 +1183,7 @@ class TCLIService_GetColumns_result { void __set_success(const TGetColumnsResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetColumns_result & rhs) const @@ -1221,6 +1241,7 @@ class TCLIService_GetFunctions_args { void __set_req(const TGetFunctionsReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetFunctions_args & rhs) const @@ -1272,6 +1293,7 @@ class TCLIService_GetFunctions_result { void __set_success(const TGetFunctionsResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetFunctions_result & rhs) const @@ -1329,6 +1351,7 @@ class TCLIService_GetOperationStatus_args { void __set_req(const TGetOperationStatusReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetOperationStatus_args & rhs) const @@ -1380,6 +1403,7 @@ class TCLIService_GetOperationStatus_result { void __set_success(const TGetOperationStatusResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetOperationStatus_result & rhs) const @@ -1437,6 +1461,7 @@ class TCLIService_CancelOperation_args { void __set_req(const TCancelOperationReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_CancelOperation_args & rhs) const @@ -1488,6 +1513,7 @@ class TCLIService_CancelOperation_result { void __set_success(const TCancelOperationResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_CancelOperation_result & rhs) const @@ -1545,6 +1571,7 @@ class TCLIService_CloseOperation_args { void __set_req(const TCloseOperationReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_CloseOperation_args & rhs) const @@ -1596,6 +1623,7 @@ class TCLIService_CloseOperation_result { void __set_success(const TCloseOperationResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_CloseOperation_result & rhs) const @@ -1653,6 +1681,7 @@ class TCLIService_GetResultSetMetadata_args { void __set_req(const TGetResultSetMetadataReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetResultSetMetadata_args & rhs) const @@ -1704,6 +1733,7 @@ class TCLIService_GetResultSetMetadata_result { void __set_success(const TGetResultSetMetadataResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetResultSetMetadata_result & rhs) const @@ -1761,6 +1791,7 @@ class TCLIService_FetchResults_args { void __set_req(const TFetchResultsReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_FetchResults_args & rhs) const @@ -1812,6 +1843,7 @@ class TCLIService_FetchResults_result { void __set_success(const TFetchResultsResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_FetchResults_result & rhs) const @@ -1869,6 +1901,7 @@ class TCLIService_GetDelegationToken_args { void __set_req(const TGetDelegationTokenReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_GetDelegationToken_args & rhs) const @@ -1920,6 +1953,7 @@ class TCLIService_GetDelegationToken_result { void __set_success(const TGetDelegationTokenResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_GetDelegationToken_result & rhs) const @@ -1977,6 +2011,7 @@ class TCLIService_CancelDelegationToken_args { void __set_req(const TCancelDelegationTokenReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_CancelDelegationToken_args & rhs) const @@ -2028,6 +2063,7 @@ class TCLIService_CancelDelegationToken_result { void __set_success(const TCancelDelegationTokenResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_CancelDelegationToken_result & rhs) const @@ -2085,6 +2121,7 @@ class TCLIService_RenewDelegationToken_args { void __set_req(const TRenewDelegationTokenReq& val) { req = val; + __isset.req = true; } bool operator == (const TCLIService_RenewDelegationToken_args & rhs) const @@ -2136,6 +2173,7 @@ class TCLIService_RenewDelegationToken_result { void __set_success(const TRenewDelegationTokenResp& val) { success = val; + __isset.success = true; } bool operator == (const TCLIService_RenewDelegationToken_result & rhs) const diff --git service/src/gen/thrift/gen-cpp/TCLIService_types.cpp service/src/gen/thrift/gen-cpp/TCLIService_types.cpp index 86eeea3..504dbb9 100644 --- service/src/gen/thrift/gen-cpp/TCLIService_types.cpp +++ service/src/gen/thrift/gen-cpp/TCLIService_types.cpp @@ -315,18 +315,24 @@ uint32_t TTypeQualifierValue::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TTypeQualifierValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TTypeQualifierValue"); if (this->__isset.i32Value) { + ++fcnt; xfer += oprot->writeFieldBegin("i32Value", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->i32Value); xfer += oprot->writeFieldEnd(); } if (this->__isset.stringValue) { + ++fcnt; xfer += oprot->writeFieldBegin("stringValue", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->stringValue); xfer += oprot->writeFieldEnd(); } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -402,8 +408,10 @@ uint32_t TTypeQualifiers::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TTypeQualifiers::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TTypeQualifiers"); + ++fcnt; xfer += oprot->writeFieldBegin("qualifiers", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->qualifiers.size())); @@ -485,13 +493,16 @@ uint32_t TPrimitiveTypeEntry::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TPrimitiveTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TPrimitiveTypeEntry"); + ++fcnt; xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->type); xfer += oprot->writeFieldEnd(); if (this->__isset.typeQualifiers) { + ++fcnt; xfer += oprot->writeFieldBegin("typeQualifiers", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->typeQualifiers.write(oprot); xfer += oprot->writeFieldEnd(); @@ -556,8 +567,10 @@ uint32_t TArrayTypeEntry::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TArrayTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TArrayTypeEntry"); + ++fcnt; xfer += oprot->writeFieldBegin("objectTypePtr", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->objectTypePtr); xfer += oprot->writeFieldEnd(); @@ -631,12 +644,15 @@ uint32_t TMapTypeEntry::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TMapTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TMapTypeEntry"); + ++fcnt; xfer += oprot->writeFieldBegin("keyTypePtr", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->keyTypePtr); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("valueTypePtr", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->valueTypePtr); xfer += oprot->writeFieldEnd(); @@ -715,8 +731,10 @@ uint32_t TStructTypeEntry::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TStructTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TStructTypeEntry"); + ++fcnt; xfer += oprot->writeFieldBegin("nameToTypePtr", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I32, static_cast(this->nameToTypePtr.size())); @@ -803,8 +821,10 @@ uint32_t TUnionTypeEntry::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TUnionTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TUnionTypeEntry"); + ++fcnt; xfer += oprot->writeFieldBegin("nameToTypePtr", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I32, static_cast(this->nameToTypePtr.size())); @@ -876,8 +896,10 @@ uint32_t TUserDefinedTypeEntry::read(::apache::thrift::protocol::TProtocol* ipro uint32_t TUserDefinedTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TUserDefinedTypeEntry"); + ++fcnt; xfer += oprot->writeFieldBegin("typeClassName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->typeClassName); xfer += oprot->writeFieldEnd(); @@ -977,32 +999,48 @@ uint32_t TTypeEntry::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TTypeEntry::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TTypeEntry"); - xfer += oprot->writeFieldBegin("primitiveEntry", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->primitiveEntry.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("arrayEntry", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->arrayEntry.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("mapEntry", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->mapEntry.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("structEntry", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->structEntry.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("unionEntry", ::apache::thrift::protocol::T_STRUCT, 5); - xfer += this->unionEntry.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("userDefinedTypeEntry", ::apache::thrift::protocol::T_STRUCT, 6); - xfer += this->userDefinedTypeEntry.write(oprot); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.primitiveEntry) { + ++fcnt; + xfer += oprot->writeFieldBegin("primitiveEntry", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->primitiveEntry.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.arrayEntry) { + ++fcnt; + xfer += oprot->writeFieldBegin("arrayEntry", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->arrayEntry.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.mapEntry) { + ++fcnt; + xfer += oprot->writeFieldBegin("mapEntry", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->mapEntry.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.structEntry) { + ++fcnt; + xfer += oprot->writeFieldBegin("structEntry", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->structEntry.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.unionEntry) { + ++fcnt; + xfer += oprot->writeFieldBegin("unionEntry", ::apache::thrift::protocol::T_STRUCT, 5); + xfer += this->unionEntry.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.userDefinedTypeEntry) { + ++fcnt; + xfer += oprot->writeFieldBegin("userDefinedTypeEntry", ::apache::thrift::protocol::T_STRUCT, 6); + xfer += this->userDefinedTypeEntry.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -1079,8 +1117,10 @@ uint32_t TTypeDesc::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TTypeDesc::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TTypeDesc"); + ++fcnt; xfer += oprot->writeFieldBegin("types", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->types.size())); @@ -1181,21 +1221,26 @@ uint32_t TColumnDesc::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TColumnDesc::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TColumnDesc"); + ++fcnt; xfer += oprot->writeFieldBegin("columnName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->columnName); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("typeDesc", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->typeDesc.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("position", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32(this->position); xfer += oprot->writeFieldEnd(); if (this->__isset.comment) { + ++fcnt; xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->comment); xfer += oprot->writeFieldEnd(); @@ -1274,8 +1319,10 @@ uint32_t TTableSchema::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TTableSchema::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TTableSchema"); + ++fcnt; xfer += oprot->writeFieldBegin("columns", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->columns.size())); @@ -1343,9 +1390,11 @@ uint32_t TBoolValue::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TBoolValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TBoolValue"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_BOOL, 1); xfer += oprot->writeBool(this->value); xfer += oprot->writeFieldEnd(); @@ -1406,9 +1455,11 @@ uint32_t TByteValue::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TByteValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TByteValue"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_BYTE, 1); xfer += oprot->writeByte(this->value); xfer += oprot->writeFieldEnd(); @@ -1469,9 +1520,11 @@ uint32_t TI16Value::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TI16Value::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TI16Value"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_I16, 1); xfer += oprot->writeI16(this->value); xfer += oprot->writeFieldEnd(); @@ -1532,9 +1585,11 @@ uint32_t TI32Value::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TI32Value::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TI32Value"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->value); xfer += oprot->writeFieldEnd(); @@ -1595,9 +1650,11 @@ uint32_t TI64Value::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TI64Value::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TI64Value"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->value); xfer += oprot->writeFieldEnd(); @@ -1658,9 +1715,11 @@ uint32_t TDoubleValue::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TDoubleValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TDoubleValue"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_DOUBLE, 1); xfer += oprot->writeDouble(this->value); xfer += oprot->writeFieldEnd(); @@ -1721,9 +1780,11 @@ uint32_t TStringValue::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TStringValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TStringValue"); if (this->__isset.value) { + ++fcnt; xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->value); xfer += oprot->writeFieldEnd(); @@ -1832,36 +1893,54 @@ uint32_t TColumnValue::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TColumnValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TColumnValue"); - xfer += oprot->writeFieldBegin("boolVal", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->boolVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("byteVal", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->byteVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("i16Val", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->i16Val.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("i32Val", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->i32Val.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("i64Val", ::apache::thrift::protocol::T_STRUCT, 5); - xfer += this->i64Val.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("doubleVal", ::apache::thrift::protocol::T_STRUCT, 6); - xfer += this->doubleVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("stringVal", ::apache::thrift::protocol::T_STRUCT, 7); - xfer += this->stringVal.write(oprot); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.boolVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("boolVal", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->boolVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.byteVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("byteVal", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->byteVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.i16Val) { + ++fcnt; + xfer += oprot->writeFieldBegin("i16Val", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->i16Val.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.i32Val) { + ++fcnt; + xfer += oprot->writeFieldBegin("i32Val", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->i32Val.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.i64Val) { + ++fcnt; + xfer += oprot->writeFieldBegin("i64Val", ::apache::thrift::protocol::T_STRUCT, 5); + xfer += this->i64Val.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.doubleVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("doubleVal", ::apache::thrift::protocol::T_STRUCT, 6); + xfer += this->doubleVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.stringVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("stringVal", ::apache::thrift::protocol::T_STRUCT, 7); + xfer += this->stringVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -1939,8 +2018,10 @@ uint32_t TRow::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TRow::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TRow"); + ++fcnt; xfer += oprot->writeFieldBegin("colVals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colVals.size())); @@ -2034,8 +2115,10 @@ uint32_t TBoolColumn::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TBoolColumn::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TBoolColumn"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_BOOL, static_cast(this->values.size())); @@ -2048,6 +2131,7 @@ uint32_t TBoolColumn::write(::apache::thrift::protocol::TProtocol* oprot) const } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2134,8 +2218,10 @@ uint32_t TByteColumn::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TByteColumn::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TByteColumn"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_BYTE, static_cast(this->values.size())); @@ -2148,6 +2234,7 @@ uint32_t TByteColumn::write(::apache::thrift::protocol::TProtocol* oprot) const } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2234,8 +2321,10 @@ uint32_t TI16Column::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TI16Column::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TI16Column"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I16, static_cast(this->values.size())); @@ -2248,6 +2337,7 @@ uint32_t TI16Column::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2334,8 +2424,10 @@ uint32_t TI32Column::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TI32Column::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TI32Column"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); @@ -2348,6 +2440,7 @@ uint32_t TI32Column::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2434,8 +2527,10 @@ uint32_t TI64Column::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TI64Column::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TI64Column"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->values.size())); @@ -2448,6 +2543,7 @@ uint32_t TI64Column::write(::apache::thrift::protocol::TProtocol* oprot) const { } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2534,8 +2630,10 @@ uint32_t TDoubleColumn::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TDoubleColumn::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TDoubleColumn"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_DOUBLE, static_cast(this->values.size())); @@ -2548,6 +2646,7 @@ uint32_t TDoubleColumn::write(::apache::thrift::protocol::TProtocol* oprot) cons } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2634,8 +2733,10 @@ uint32_t TStringColumn::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TStringColumn::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TStringColumn"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); @@ -2648,6 +2749,7 @@ uint32_t TStringColumn::write(::apache::thrift::protocol::TProtocol* oprot) cons } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2734,8 +2836,10 @@ uint32_t TBinaryColumn::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TBinaryColumn::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TBinaryColumn"); + ++fcnt; xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); @@ -2748,6 +2852,7 @@ uint32_t TBinaryColumn::write(::apache::thrift::protocol::TProtocol* oprot) cons } xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("nulls", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->nulls); xfer += oprot->writeFieldEnd(); @@ -2864,40 +2969,60 @@ uint32_t TColumn::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TColumn::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TColumn"); - xfer += oprot->writeFieldBegin("boolVal", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->boolVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("byteVal", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->byteVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("i16Val", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->i16Val.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("i32Val", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->i32Val.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("i64Val", ::apache::thrift::protocol::T_STRUCT, 5); - xfer += this->i64Val.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("doubleVal", ::apache::thrift::protocol::T_STRUCT, 6); - xfer += this->doubleVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("stringVal", ::apache::thrift::protocol::T_STRUCT, 7); - xfer += this->stringVal.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("binaryVal", ::apache::thrift::protocol::T_STRUCT, 8); - xfer += this->binaryVal.write(oprot); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.boolVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("boolVal", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->boolVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.byteVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("byteVal", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->byteVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.i16Val) { + ++fcnt; + xfer += oprot->writeFieldBegin("i16Val", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->i16Val.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.i32Val) { + ++fcnt; + xfer += oprot->writeFieldBegin("i32Val", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->i32Val.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.i64Val) { + ++fcnt; + xfer += oprot->writeFieldBegin("i64Val", ::apache::thrift::protocol::T_STRUCT, 5); + xfer += this->i64Val.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.doubleVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("doubleVal", ::apache::thrift::protocol::T_STRUCT, 6); + xfer += this->doubleVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.stringVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("stringVal", ::apache::thrift::protocol::T_STRUCT, 7); + xfer += this->stringVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.binaryVal) { + ++fcnt; + xfer += oprot->writeFieldBegin("binaryVal", ::apache::thrift::protocol::T_STRUCT, 8); + xfer += this->binaryVal.write(oprot); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -3007,12 +3132,15 @@ uint32_t TRowSet::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TRowSet::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TRowSet"); + ++fcnt; xfer += oprot->writeFieldBegin("startRowOffset", ::apache::thrift::protocol::T_I64, 1); xfer += oprot->writeI64(this->startRowOffset); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("rows", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->rows.size())); @@ -3026,6 +3154,7 @@ uint32_t TRowSet::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); if (this->__isset.columns) { + ++fcnt; xfer += oprot->writeFieldBegin("columns", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->columns.size())); @@ -3145,13 +3274,16 @@ uint32_t TStatus::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TStatus::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TStatus"); + ++fcnt; xfer += oprot->writeFieldBegin("statusCode", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->statusCode); xfer += oprot->writeFieldEnd(); if (this->__isset.infoMessages) { + ++fcnt; xfer += oprot->writeFieldBegin("infoMessages", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->infoMessages.size())); @@ -3165,16 +3297,19 @@ uint32_t TStatus::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldEnd(); } if (this->__isset.sqlState) { + ++fcnt; xfer += oprot->writeFieldBegin("sqlState", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->sqlState); xfer += oprot->writeFieldEnd(); } if (this->__isset.errorCode) { + ++fcnt; xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->errorCode); xfer += oprot->writeFieldEnd(); } if (this->__isset.errorMessage) { + ++fcnt; xfer += oprot->writeFieldBegin("errorMessage", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->errorMessage); xfer += oprot->writeFieldEnd(); @@ -3253,12 +3388,15 @@ uint32_t THandleIdentifier::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t THandleIdentifier::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("THandleIdentifier"); + ++fcnt; xfer += oprot->writeFieldBegin("guid", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeBinary(this->guid); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("secret", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeBinary(this->secret); xfer += oprot->writeFieldEnd(); @@ -3322,8 +3460,10 @@ uint32_t TSessionHandle::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TSessionHandle::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TSessionHandle"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionId", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionId.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3418,21 +3558,26 @@ uint32_t TOperationHandle::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TOperationHandle::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TOperationHandle"); + ++fcnt; xfer += oprot->writeFieldBegin("operationId", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->operationId.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("operationType", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->operationType); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("hasResultSet", ::apache::thrift::protocol::T_BOOL, 3); xfer += oprot->writeBool(this->hasResultSet); xfer += oprot->writeFieldEnd(); if (this->__isset.modifiedRowCount) { + ++fcnt; xfer += oprot->writeFieldBegin("modifiedRowCount", ::apache::thrift::protocol::T_DOUBLE, 4); xfer += oprot->writeDouble(this->modifiedRowCount); xfer += oprot->writeFieldEnd(); @@ -3540,23 +3685,28 @@ uint32_t TOpenSessionReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TOpenSessionReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TOpenSessionReq"); + ++fcnt; xfer += oprot->writeFieldBegin("client_protocol", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((int32_t)this->client_protocol); xfer += oprot->writeFieldEnd(); if (this->__isset.username) { + ++fcnt; xfer += oprot->writeFieldBegin("username", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->username); xfer += oprot->writeFieldEnd(); } if (this->__isset.password) { + ++fcnt; xfer += oprot->writeFieldBegin("password", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->password); xfer += oprot->writeFieldEnd(); } if (this->__isset.configuration) { + ++fcnt; xfer += oprot->writeFieldBegin("configuration", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->configuration.size())); @@ -3676,22 +3826,27 @@ uint32_t TOpenSessionResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TOpenSessionResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TOpenSessionResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("serverProtocolVersion", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->serverProtocolVersion); xfer += oprot->writeFieldEnd(); if (this->__isset.sessionHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); } if (this->__isset.configuration) { + ++fcnt; xfer += oprot->writeFieldBegin("configuration", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->configuration.size())); @@ -3767,8 +3922,10 @@ uint32_t TCloseSessionReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TCloseSessionReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCloseSessionReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3831,8 +3988,10 @@ uint32_t TCloseSessionResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TCloseSessionResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCloseSessionResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); @@ -3932,32 +4091,48 @@ uint32_t TGetInfoValue::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetInfoValue::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetInfoValue"); - xfer += oprot->writeFieldBegin("stringValue", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->stringValue); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("smallIntValue", ::apache::thrift::protocol::T_I16, 2); - xfer += oprot->writeI16(this->smallIntValue); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("integerBitmask", ::apache::thrift::protocol::T_I32, 3); - xfer += oprot->writeI32(this->integerBitmask); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("integerFlag", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32(this->integerFlag); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("binaryValue", ::apache::thrift::protocol::T_I32, 5); - xfer += oprot->writeI32(this->binaryValue); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("lenValue", ::apache::thrift::protocol::T_I64, 6); - xfer += oprot->writeI64(this->lenValue); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.stringValue) { + ++fcnt; + xfer += oprot->writeFieldBegin("stringValue", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->stringValue); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.smallIntValue) { + ++fcnt; + xfer += oprot->writeFieldBegin("smallIntValue", ::apache::thrift::protocol::T_I16, 2); + xfer += oprot->writeI16(this->smallIntValue); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.integerBitmask) { + ++fcnt; + xfer += oprot->writeFieldBegin("integerBitmask", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32(this->integerBitmask); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.integerFlag) { + ++fcnt; + xfer += oprot->writeFieldBegin("integerFlag", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32(this->integerFlag); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.binaryValue) { + ++fcnt; + xfer += oprot->writeFieldBegin("binaryValue", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32(this->binaryValue); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.lenValue) { + ++fcnt; + xfer += oprot->writeFieldBegin("lenValue", ::apache::thrift::protocol::T_I64, 6); + xfer += oprot->writeI64(this->lenValue); + xfer += oprot->writeFieldEnd(); + } + if (fcnt != 1) { + throw ::apache::thrift::TException("Union must have one set value."); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4035,12 +4210,15 @@ uint32_t TGetInfoReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetInfoReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetInfoReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("infoType", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->infoType); xfer += oprot->writeFieldEnd(); @@ -4115,12 +4293,15 @@ uint32_t TGetInfoResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetInfoResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetInfoResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("infoValue", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->infoValue.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4226,17 +4407,21 @@ uint32_t TExecuteStatementReq::read(::apache::thrift::protocol::TProtocol* iprot uint32_t TExecuteStatementReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TExecuteStatementReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("statement", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->statement); xfer += oprot->writeFieldEnd(); if (this->__isset.confOverlay) { + ++fcnt; xfer += oprot->writeFieldBegin("confOverlay", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->confOverlay.size())); @@ -4251,6 +4436,7 @@ uint32_t TExecuteStatementReq::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldEnd(); } if (this->__isset.runAsync) { + ++fcnt; xfer += oprot->writeFieldBegin("runAsync", ::apache::thrift::protocol::T_BOOL, 4); xfer += oprot->writeBool(this->runAsync); xfer += oprot->writeFieldEnd(); @@ -4325,13 +4511,16 @@ uint32_t TExecuteStatementResp::read(::apache::thrift::protocol::TProtocol* ipro uint32_t TExecuteStatementResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TExecuteStatementResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4396,8 +4585,10 @@ uint32_t TGetTypeInfoReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetTypeInfoReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetTypeInfoReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4468,13 +4659,16 @@ uint32_t TGetTypeInfoResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetTypeInfoResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetTypeInfoResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4539,8 +4733,10 @@ uint32_t TGetCatalogsReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetCatalogsReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetCatalogsReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4611,13 +4807,16 @@ uint32_t TGetCatalogsResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetCatalogsResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetCatalogsResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4698,18 +4897,22 @@ uint32_t TGetSchemasReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetSchemasReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetSchemasReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.catalogName) { + ++fcnt; xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->catalogName); xfer += oprot->writeFieldEnd(); } if (this->__isset.schemaName) { + ++fcnt; xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->schemaName); xfer += oprot->writeFieldEnd(); @@ -4783,13 +4986,16 @@ uint32_t TGetSchemasResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetSchemasResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetSchemasResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -4898,28 +5104,34 @@ uint32_t TGetTablesReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetTablesReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetTablesReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.catalogName) { + ++fcnt; xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->catalogName); xfer += oprot->writeFieldEnd(); } if (this->__isset.schemaName) { + ++fcnt; xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->schemaName); xfer += oprot->writeFieldEnd(); } if (this->__isset.tableName) { + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); } if (this->__isset.tableTypes) { + ++fcnt; xfer += oprot->writeFieldBegin("tableTypes", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tableTypes.size())); @@ -5003,13 +5215,16 @@ uint32_t TGetTablesResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetTablesResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetTablesResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5074,8 +5289,10 @@ uint32_t TGetTableTypesReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetTableTypesReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetTableTypesReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5146,13 +5363,16 @@ uint32_t TGetTableTypesResp::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TGetTableTypesResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetTableTypesResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5249,28 +5469,34 @@ uint32_t TGetColumnsReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetColumnsReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetColumnsReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.catalogName) { + ++fcnt; xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->catalogName); xfer += oprot->writeFieldEnd(); } if (this->__isset.schemaName) { + ++fcnt; xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->schemaName); xfer += oprot->writeFieldEnd(); } if (this->__isset.tableName) { + ++fcnt; xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->tableName); xfer += oprot->writeFieldEnd(); } if (this->__isset.columnName) { + ++fcnt; xfer += oprot->writeFieldBegin("columnName", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->columnName); xfer += oprot->writeFieldEnd(); @@ -5346,13 +5572,16 @@ uint32_t TGetColumnsResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetColumnsResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetColumnsResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5444,22 +5673,27 @@ uint32_t TGetFunctionsReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetFunctionsReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetFunctionsReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.catalogName) { + ++fcnt; xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->catalogName); xfer += oprot->writeFieldEnd(); } if (this->__isset.schemaName) { + ++fcnt; xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->schemaName); xfer += oprot->writeFieldEnd(); } + ++fcnt; xfer += oprot->writeFieldBegin("functionName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->functionName); xfer += oprot->writeFieldEnd(); @@ -5534,13 +5768,16 @@ uint32_t TGetFunctionsResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TGetFunctionsResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetFunctionsResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationHandle) { + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5605,8 +5842,10 @@ uint32_t TGetOperationStatusReq::read(::apache::thrift::protocol::TProtocol* ipr uint32_t TGetOperationStatusReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetOperationStatusReq"); + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5703,28 +5942,34 @@ uint32_t TGetOperationStatusResp::read(::apache::thrift::protocol::TProtocol* ip uint32_t TGetOperationStatusResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetOperationStatusResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.operationState) { + ++fcnt; xfer += oprot->writeFieldBegin("operationState", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->operationState); xfer += oprot->writeFieldEnd(); } if (this->__isset.sqlState) { + ++fcnt; xfer += oprot->writeFieldBegin("sqlState", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->sqlState); xfer += oprot->writeFieldEnd(); } if (this->__isset.errorCode) { + ++fcnt; xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->errorCode); xfer += oprot->writeFieldEnd(); } if (this->__isset.errorMessage) { + ++fcnt; xfer += oprot->writeFieldBegin("errorMessage", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->errorMessage); xfer += oprot->writeFieldEnd(); @@ -5792,8 +6037,10 @@ uint32_t TCancelOperationReq::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TCancelOperationReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCancelOperationReq"); + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5856,8 +6103,10 @@ uint32_t TCancelOperationResp::read(::apache::thrift::protocol::TProtocol* iprot uint32_t TCancelOperationResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCancelOperationResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5920,8 +6169,10 @@ uint32_t TCloseOperationReq::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TCloseOperationReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCloseOperationReq"); + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -5984,8 +6235,10 @@ uint32_t TCloseOperationResp::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t TCloseOperationResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCloseOperationResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6048,8 +6301,10 @@ uint32_t TGetResultSetMetadataReq::read(::apache::thrift::protocol::TProtocol* i uint32_t TGetResultSetMetadataReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetResultSetMetadataReq"); + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6120,13 +6375,16 @@ uint32_t TGetResultSetMetadataResp::read(::apache::thrift::protocol::TProtocol* uint32_t TGetResultSetMetadataResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetResultSetMetadataResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.schema) { + ++fcnt; xfer += oprot->writeFieldBegin("schema", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->schema.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6223,21 +6481,26 @@ uint32_t TFetchResultsReq::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TFetchResultsReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TFetchResultsReq"); + ++fcnt; xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->operationHandle.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("orientation", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32((int32_t)this->orientation); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("maxRows", ::apache::thrift::protocol::T_I64, 3); xfer += oprot->writeI64(this->maxRows); xfer += oprot->writeFieldEnd(); if (this->__isset.fetchType) { + ++fcnt; xfer += oprot->writeFieldBegin("fetchType", ::apache::thrift::protocol::T_I16, 4); xfer += oprot->writeI16(this->fetchType); xfer += oprot->writeFieldEnd(); @@ -6320,18 +6583,22 @@ uint32_t TFetchResultsResp::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t TFetchResultsResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TFetchResultsResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.hasMoreRows) { + ++fcnt; xfer += oprot->writeFieldBegin("hasMoreRows", ::apache::thrift::protocol::T_BOOL, 2); xfer += oprot->writeBool(this->hasMoreRows); xfer += oprot->writeFieldEnd(); } if (this->__isset.results) { + ++fcnt; xfer += oprot->writeFieldBegin("results", ::apache::thrift::protocol::T_STRUCT, 3); xfer += this->results.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6419,16 +6686,20 @@ uint32_t TGetDelegationTokenReq::read(::apache::thrift::protocol::TProtocol* ipr uint32_t TGetDelegationTokenReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetDelegationTokenReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("owner", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->owner); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("renewer", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->renewer); xfer += oprot->writeFieldEnd(); @@ -6501,13 +6772,16 @@ uint32_t TGetDelegationTokenResp::read(::apache::thrift::protocol::TProtocol* ip uint32_t TGetDelegationTokenResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TGetDelegationTokenResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); if (this->__isset.delegationToken) { + ++fcnt; xfer += oprot->writeFieldBegin("delegationToken", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->delegationToken); xfer += oprot->writeFieldEnd(); @@ -6583,12 +6857,15 @@ uint32_t TCancelDelegationTokenReq::read(::apache::thrift::protocol::TProtocol* uint32_t TCancelDelegationTokenReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCancelDelegationTokenReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("delegationToken", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->delegationToken); xfer += oprot->writeFieldEnd(); @@ -6652,8 +6929,10 @@ uint32_t TCancelDelegationTokenResp::read(::apache::thrift::protocol::TProtocol* uint32_t TCancelDelegationTokenResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TCancelDelegationTokenResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); @@ -6727,12 +7006,15 @@ uint32_t TRenewDelegationTokenReq::read(::apache::thrift::protocol::TProtocol* i uint32_t TRenewDelegationTokenReq::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TRenewDelegationTokenReq"); + ++fcnt; xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->sessionHandle.write(oprot); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("delegationToken", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->delegationToken); xfer += oprot->writeFieldEnd(); @@ -6796,8 +7078,10 @@ uint32_t TRenewDelegationTokenResp::read(::apache::thrift::protocol::TProtocol* uint32_t TRenewDelegationTokenResp::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("TRenewDelegationTokenResp"); + ++fcnt; xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->status.write(oprot); xfer += oprot->writeFieldEnd(); diff --git service/src/gen/thrift/gen-cpp/TCLIService_types.h service/src/gen/thrift/gen-cpp/TCLIService_types.h index 7bceabd..519ec89 100644 --- service/src/gen/thrift/gen-cpp/TCLIService_types.h +++ service/src/gen/thrift/gen-cpp/TCLIService_types.h @@ -202,11 +202,13 @@ class TTypeQualifierValue { void __set_i32Value(const int32_t val) { i32Value = val; + __isset = _TTypeQualifierValue__isset(); __isset.i32Value = true; } void __set_stringValue(const std::string& val) { stringValue = val; + __isset = _TTypeQualifierValue__isset(); __isset.stringValue = true; } @@ -549,41 +551,65 @@ class TTypeEntry { void __set_primitiveEntry(const TPrimitiveTypeEntry& val) { primitiveEntry = val; + __isset = _TTypeEntry__isset(); + __isset.primitiveEntry = true; } void __set_arrayEntry(const TArrayTypeEntry& val) { arrayEntry = val; + __isset = _TTypeEntry__isset(); + __isset.arrayEntry = true; } void __set_mapEntry(const TMapTypeEntry& val) { mapEntry = val; + __isset = _TTypeEntry__isset(); + __isset.mapEntry = true; } void __set_structEntry(const TStructTypeEntry& val) { structEntry = val; + __isset = _TTypeEntry__isset(); + __isset.structEntry = true; } void __set_unionEntry(const TUnionTypeEntry& val) { unionEntry = val; + __isset = _TTypeEntry__isset(); + __isset.unionEntry = true; } void __set_userDefinedTypeEntry(const TUserDefinedTypeEntry& val) { userDefinedTypeEntry = val; + __isset = _TTypeEntry__isset(); + __isset.userDefinedTypeEntry = true; } bool operator == (const TTypeEntry & rhs) const { - if (!(primitiveEntry == rhs.primitiveEntry)) + if (__isset.primitiveEntry != rhs.__isset.primitiveEntry) return false; - if (!(arrayEntry == rhs.arrayEntry)) + else if (__isset.primitiveEntry && !(primitiveEntry == rhs.primitiveEntry)) return false; - if (!(mapEntry == rhs.mapEntry)) + if (__isset.arrayEntry != rhs.__isset.arrayEntry) return false; - if (!(structEntry == rhs.structEntry)) + else if (__isset.arrayEntry && !(arrayEntry == rhs.arrayEntry)) return false; - if (!(unionEntry == rhs.unionEntry)) + if (__isset.mapEntry != rhs.__isset.mapEntry) return false; - if (!(userDefinedTypeEntry == rhs.userDefinedTypeEntry)) + else if (__isset.mapEntry && !(mapEntry == rhs.mapEntry)) + return false; + if (__isset.structEntry != rhs.__isset.structEntry) + return false; + else if (__isset.structEntry && !(structEntry == rhs.structEntry)) + return false; + if (__isset.unionEntry != rhs.__isset.unionEntry) + return false; + else if (__isset.unionEntry && !(unionEntry == rhs.unionEntry)) + return false; + if (__isset.userDefinedTypeEntry != rhs.__isset.userDefinedTypeEntry) + return false; + else if (__isset.userDefinedTypeEntry && !(userDefinedTypeEntry == rhs.userDefinedTypeEntry)) return false; return true; } @@ -1097,47 +1123,75 @@ class TColumnValue { void __set_boolVal(const TBoolValue& val) { boolVal = val; + __isset = _TColumnValue__isset(); + __isset.boolVal = true; } void __set_byteVal(const TByteValue& val) { byteVal = val; + __isset = _TColumnValue__isset(); + __isset.byteVal = true; } void __set_i16Val(const TI16Value& val) { i16Val = val; + __isset = _TColumnValue__isset(); + __isset.i16Val = true; } void __set_i32Val(const TI32Value& val) { i32Val = val; + __isset = _TColumnValue__isset(); + __isset.i32Val = true; } void __set_i64Val(const TI64Value& val) { i64Val = val; + __isset = _TColumnValue__isset(); + __isset.i64Val = true; } void __set_doubleVal(const TDoubleValue& val) { doubleVal = val; + __isset = _TColumnValue__isset(); + __isset.doubleVal = true; } void __set_stringVal(const TStringValue& val) { stringVal = val; + __isset = _TColumnValue__isset(); + __isset.stringVal = true; } bool operator == (const TColumnValue & rhs) const { - if (!(boolVal == rhs.boolVal)) + if (__isset.boolVal != rhs.__isset.boolVal) + return false; + else if (__isset.boolVal && !(boolVal == rhs.boolVal)) + return false; + if (__isset.byteVal != rhs.__isset.byteVal) return false; - if (!(byteVal == rhs.byteVal)) + else if (__isset.byteVal && !(byteVal == rhs.byteVal)) return false; - if (!(i16Val == rhs.i16Val)) + if (__isset.i16Val != rhs.__isset.i16Val) return false; - if (!(i32Val == rhs.i32Val)) + else if (__isset.i16Val && !(i16Val == rhs.i16Val)) return false; - if (!(i64Val == rhs.i64Val)) + if (__isset.i32Val != rhs.__isset.i32Val) return false; - if (!(doubleVal == rhs.doubleVal)) + else if (__isset.i32Val && !(i32Val == rhs.i32Val)) return false; - if (!(stringVal == rhs.stringVal)) + if (__isset.i64Val != rhs.__isset.i64Val) + return false; + else if (__isset.i64Val && !(i64Val == rhs.i64Val)) + return false; + if (__isset.doubleVal != rhs.__isset.doubleVal) + return false; + else if (__isset.doubleVal && !(doubleVal == rhs.doubleVal)) + return false; + if (__isset.stringVal != rhs.__isset.stringVal) + return false; + else if (__isset.stringVal && !(stringVal == rhs.stringVal)) return false; return true; } @@ -1579,53 +1633,85 @@ class TColumn { void __set_boolVal(const TBoolColumn& val) { boolVal = val; + __isset = _TColumn__isset(); + __isset.boolVal = true; } void __set_byteVal(const TByteColumn& val) { byteVal = val; + __isset = _TColumn__isset(); + __isset.byteVal = true; } void __set_i16Val(const TI16Column& val) { i16Val = val; + __isset = _TColumn__isset(); + __isset.i16Val = true; } void __set_i32Val(const TI32Column& val) { i32Val = val; + __isset = _TColumn__isset(); + __isset.i32Val = true; } void __set_i64Val(const TI64Column& val) { i64Val = val; + __isset = _TColumn__isset(); + __isset.i64Val = true; } void __set_doubleVal(const TDoubleColumn& val) { doubleVal = val; + __isset = _TColumn__isset(); + __isset.doubleVal = true; } void __set_stringVal(const TStringColumn& val) { stringVal = val; + __isset = _TColumn__isset(); + __isset.stringVal = true; } void __set_binaryVal(const TBinaryColumn& val) { binaryVal = val; + __isset = _TColumn__isset(); + __isset.binaryVal = true; } bool operator == (const TColumn & rhs) const { - if (!(boolVal == rhs.boolVal)) + if (__isset.boolVal != rhs.__isset.boolVal) + return false; + else if (__isset.boolVal && !(boolVal == rhs.boolVal)) + return false; + if (__isset.byteVal != rhs.__isset.byteVal) return false; - if (!(byteVal == rhs.byteVal)) + else if (__isset.byteVal && !(byteVal == rhs.byteVal)) return false; - if (!(i16Val == rhs.i16Val)) + if (__isset.i16Val != rhs.__isset.i16Val) return false; - if (!(i32Val == rhs.i32Val)) + else if (__isset.i16Val && !(i16Val == rhs.i16Val)) return false; - if (!(i64Val == rhs.i64Val)) + if (__isset.i32Val != rhs.__isset.i32Val) return false; - if (!(doubleVal == rhs.doubleVal)) + else if (__isset.i32Val && !(i32Val == rhs.i32Val)) return false; - if (!(stringVal == rhs.stringVal)) + if (__isset.i64Val != rhs.__isset.i64Val) return false; - if (!(binaryVal == rhs.binaryVal)) + else if (__isset.i64Val && !(i64Val == rhs.i64Val)) + return false; + if (__isset.doubleVal != rhs.__isset.doubleVal) + return false; + else if (__isset.doubleVal && !(doubleVal == rhs.doubleVal)) + return false; + if (__isset.stringVal != rhs.__isset.stringVal) + return false; + else if (__isset.stringVal && !(stringVal == rhs.stringVal)) + return false; + if (__isset.binaryVal != rhs.__isset.binaryVal) + return false; + else if (__isset.binaryVal && !(binaryVal == rhs.binaryVal)) return false; return true; } @@ -2192,41 +2278,65 @@ class TGetInfoValue { void __set_stringValue(const std::string& val) { stringValue = val; + __isset = _TGetInfoValue__isset(); + __isset.stringValue = true; } void __set_smallIntValue(const int16_t val) { smallIntValue = val; + __isset = _TGetInfoValue__isset(); + __isset.smallIntValue = true; } void __set_integerBitmask(const int32_t val) { integerBitmask = val; + __isset = _TGetInfoValue__isset(); + __isset.integerBitmask = true; } void __set_integerFlag(const int32_t val) { integerFlag = val; + __isset = _TGetInfoValue__isset(); + __isset.integerFlag = true; } void __set_binaryValue(const int32_t val) { binaryValue = val; + __isset = _TGetInfoValue__isset(); + __isset.binaryValue = true; } void __set_lenValue(const int64_t val) { lenValue = val; + __isset = _TGetInfoValue__isset(); + __isset.lenValue = true; } bool operator == (const TGetInfoValue & rhs) const { - if (!(stringValue == rhs.stringValue)) + if (__isset.stringValue != rhs.__isset.stringValue) + return false; + else if (__isset.stringValue && !(stringValue == rhs.stringValue)) + return false; + if (__isset.smallIntValue != rhs.__isset.smallIntValue) + return false; + else if (__isset.smallIntValue && !(smallIntValue == rhs.smallIntValue)) + return false; + if (__isset.integerBitmask != rhs.__isset.integerBitmask) + return false; + else if (__isset.integerBitmask && !(integerBitmask == rhs.integerBitmask)) + return false; + if (__isset.integerFlag != rhs.__isset.integerFlag) return false; - if (!(smallIntValue == rhs.smallIntValue)) + else if (__isset.integerFlag && !(integerFlag == rhs.integerFlag)) return false; - if (!(integerBitmask == rhs.integerBitmask)) + if (__isset.binaryValue != rhs.__isset.binaryValue) return false; - if (!(integerFlag == rhs.integerFlag)) + else if (__isset.binaryValue && !(binaryValue == rhs.binaryValue)) return false; - if (!(binaryValue == rhs.binaryValue)) + if (__isset.lenValue != rhs.__isset.lenValue) return false; - if (!(lenValue == rhs.lenValue)) + else if (__isset.lenValue && !(lenValue == rhs.lenValue)) return false; return true; } diff --git service/src/gen/thrift/gen-cpp/ThriftHive.cpp service/src/gen/thrift/gen-cpp/ThriftHive.cpp index 865db69..30c6a15 100644 --- service/src/gen/thrift/gen-cpp/ThriftHive.cpp +++ service/src/gen/thrift/gen-cpp/ThriftHive.cpp @@ -50,8 +50,10 @@ uint32_t ThriftHive_execute_args::read(::apache::thrift::protocol::TProtocol* ip uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_execute_args"); + ++fcnt; xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->query); xfer += oprot->writeFieldEnd(); @@ -63,8 +65,10 @@ uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* o uint32_t ThriftHive_execute_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_execute_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->query))); xfer += oprot->writeFieldEnd(); @@ -199,6 +203,7 @@ uint32_t ThriftHive_fetchOne_args::read(::apache::thrift::protocol::TProtocol* i uint32_t ThriftHive_fetchOne_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_fetchOne_args"); xfer += oprot->writeFieldStop(); @@ -208,6 +213,7 @@ uint32_t ThriftHive_fetchOne_args::write(::apache::thrift::protocol::TProtocol* uint32_t ThriftHive_fetchOne_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_fetchOne_pargs"); xfer += oprot->writeFieldStop(); @@ -373,8 +379,10 @@ uint32_t ThriftHive_fetchN_args::read(::apache::thrift::protocol::TProtocol* ipr uint32_t ThriftHive_fetchN_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_fetchN_args"); + ++fcnt; xfer += oprot->writeFieldBegin("numRows", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->numRows); xfer += oprot->writeFieldEnd(); @@ -386,8 +394,10 @@ uint32_t ThriftHive_fetchN_args::write(::apache::thrift::protocol::TProtocol* op uint32_t ThriftHive_fetchN_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_fetchN_pargs"); + ++fcnt; xfer += oprot->writeFieldBegin("numRows", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32((*(this->numRows))); xfer += oprot->writeFieldEnd(); @@ -574,6 +584,7 @@ uint32_t ThriftHive_fetchAll_args::read(::apache::thrift::protocol::TProtocol* i uint32_t ThriftHive_fetchAll_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_fetchAll_args"); xfer += oprot->writeFieldStop(); @@ -583,6 +594,7 @@ uint32_t ThriftHive_fetchAll_args::write(::apache::thrift::protocol::TProtocol* uint32_t ThriftHive_fetchAll_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_fetchAll_pargs"); xfer += oprot->writeFieldStop(); @@ -767,6 +779,7 @@ uint32_t ThriftHive_getSchema_args::read(::apache::thrift::protocol::TProtocol* uint32_t ThriftHive_getSchema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getSchema_args"); xfer += oprot->writeFieldStop(); @@ -776,6 +789,7 @@ uint32_t ThriftHive_getSchema_args::write(::apache::thrift::protocol::TProtocol* uint32_t ThriftHive_getSchema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getSchema_pargs"); xfer += oprot->writeFieldStop(); @@ -928,6 +942,7 @@ uint32_t ThriftHive_getThriftSchema_args::read(::apache::thrift::protocol::TProt uint32_t ThriftHive_getThriftSchema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getThriftSchema_args"); xfer += oprot->writeFieldStop(); @@ -937,6 +952,7 @@ uint32_t ThriftHive_getThriftSchema_args::write(::apache::thrift::protocol::TPro uint32_t ThriftHive_getThriftSchema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getThriftSchema_pargs"); xfer += oprot->writeFieldStop(); @@ -1089,6 +1105,7 @@ uint32_t ThriftHive_getClusterStatus_args::read(::apache::thrift::protocol::TPro uint32_t ThriftHive_getClusterStatus_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getClusterStatus_args"); xfer += oprot->writeFieldStop(); @@ -1098,6 +1115,7 @@ uint32_t ThriftHive_getClusterStatus_args::write(::apache::thrift::protocol::TPr uint32_t ThriftHive_getClusterStatus_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getClusterStatus_pargs"); xfer += oprot->writeFieldStop(); @@ -1250,6 +1268,7 @@ uint32_t ThriftHive_getQueryPlan_args::read(::apache::thrift::protocol::TProtoco uint32_t ThriftHive_getQueryPlan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getQueryPlan_args"); xfer += oprot->writeFieldStop(); @@ -1259,6 +1278,7 @@ uint32_t ThriftHive_getQueryPlan_args::write(::apache::thrift::protocol::TProtoc uint32_t ThriftHive_getQueryPlan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_getQueryPlan_pargs"); xfer += oprot->writeFieldStop(); @@ -1411,6 +1431,7 @@ uint32_t ThriftHive_clean_args::read(::apache::thrift::protocol::TProtocol* ipro uint32_t ThriftHive_clean_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_clean_args"); xfer += oprot->writeFieldStop(); @@ -1420,6 +1441,7 @@ uint32_t ThriftHive_clean_args::write(::apache::thrift::protocol::TProtocol* opr uint32_t ThriftHive_clean_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("ThriftHive_clean_pargs"); xfer += oprot->writeFieldStop(); diff --git service/src/gen/thrift/gen-cpp/ThriftHive.h service/src/gen/thrift/gen-cpp/ThriftHive.h index b84362b..fca5f48 100644 --- service/src/gen/thrift/gen-cpp/ThriftHive.h +++ service/src/gen/thrift/gen-cpp/ThriftHive.h @@ -102,6 +102,7 @@ class ThriftHive_execute_args { void __set_query(const std::string& val) { query = val; + __isset.query = true; } bool operator == (const ThriftHive_execute_args & rhs) const @@ -153,6 +154,7 @@ class ThriftHive_execute_result { void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_execute_result & rhs) const @@ -249,10 +251,12 @@ class ThriftHive_fetchOne_result { void __set_success(const std::string& val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_fetchOne_result & rhs) const @@ -314,6 +318,7 @@ class ThriftHive_fetchN_args { void __set_numRows(const int32_t val) { numRows = val; + __isset.numRows = true; } bool operator == (const ThriftHive_fetchN_args & rhs) const @@ -367,10 +372,12 @@ class ThriftHive_fetchN_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_fetchN_result & rhs) const @@ -471,10 +478,12 @@ class ThriftHive_fetchAll_result { void __set_success(const std::vector & val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_fetchAll_result & rhs) const @@ -575,10 +584,12 @@ class ThriftHive_getSchema_result { void __set_success(const ::Apache::Hadoop::Hive::Schema& val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_getSchema_result & rhs) const @@ -679,10 +690,12 @@ class ThriftHive_getThriftSchema_result { void __set_success(const ::Apache::Hadoop::Hive::Schema& val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_getThriftSchema_result & rhs) const @@ -783,10 +796,12 @@ class ThriftHive_getClusterStatus_result { void __set_success(const HiveClusterStatus& val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_getClusterStatus_result & rhs) const @@ -887,10 +902,12 @@ class ThriftHive_getQueryPlan_result { void __set_success(const ::Apache::Hadoop::Hive::QueryPlan& val) { success = val; + __isset.success = true; } void __set_ex(const HiveServerException& val) { ex = val; + __isset.ex = true; } bool operator == (const ThriftHive_getQueryPlan_result & rhs) const diff --git service/src/gen/thrift/gen-cpp/hive_service_types.cpp service/src/gen/thrift/gen-cpp/hive_service_types.cpp index 255fb00..94d3f3a 100644 --- service/src/gen/thrift/gen-cpp/hive_service_types.cpp +++ service/src/gen/thrift/gen-cpp/hive_service_types.cpp @@ -107,28 +107,35 @@ uint32_t HiveClusterStatus::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t HiveClusterStatus::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HiveClusterStatus"); + ++fcnt; xfer += oprot->writeFieldBegin("taskTrackers", ::apache::thrift::protocol::T_I32, 1); xfer += oprot->writeI32(this->taskTrackers); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("mapTasks", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->mapTasks); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("reduceTasks", ::apache::thrift::protocol::T_I32, 3); xfer += oprot->writeI32(this->reduceTasks); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("maxMapTasks", ::apache::thrift::protocol::T_I32, 4); xfer += oprot->writeI32(this->maxMapTasks); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("maxReduceTasks", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->maxReduceTasks); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32((int32_t)this->state); xfer += oprot->writeFieldEnd(); @@ -210,16 +217,20 @@ uint32_t HiveServerException::read(::apache::thrift::protocol::TProtocol* iprot) uint32_t HiveServerException::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; + uint32_t fcnt = 0; xfer += oprot->writeStructBegin("HiveServerException"); + ++fcnt; xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->message); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I32, 2); xfer += oprot->writeI32(this->errorCode); xfer += oprot->writeFieldEnd(); + ++fcnt; xfer += oprot->writeFieldBegin("SQLState", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->SQLState); xfer += oprot->writeFieldEnd(); diff --git service/src/gen/thrift/gen-cpp/hive_service_types.h service/src/gen/thrift/gen-cpp/hive_service_types.h index bc0e652..79cb0c8 100644 --- service/src/gen/thrift/gen-cpp/hive_service_types.h +++ service/src/gen/thrift/gen-cpp/hive_service_types.h @@ -60,26 +60,32 @@ class HiveClusterStatus { void __set_taskTrackers(const int32_t val) { taskTrackers = val; + __isset.taskTrackers = true; } void __set_mapTasks(const int32_t val) { mapTasks = val; + __isset.mapTasks = true; } void __set_reduceTasks(const int32_t val) { reduceTasks = val; + __isset.reduceTasks = true; } void __set_maxMapTasks(const int32_t val) { maxMapTasks = val; + __isset.maxMapTasks = true; } void __set_maxReduceTasks(const int32_t val) { maxReduceTasks = val; + __isset.maxReduceTasks = true; } void __set_state(const JobTrackerState::type val) { state = val; + __isset.state = true; } bool operator == (const HiveClusterStatus & rhs) const @@ -137,14 +143,17 @@ class HiveServerException : public ::apache::thrift::TException { void __set_message(const std::string& val) { message = val; + __isset.message = true; } void __set_errorCode(const int32_t val) { errorCode = val; + __isset.errorCode = true; } void __set_SQLState(const std::string& val) { SQLState = val; + __isset.SQLState = true; } bool operator == (const HiveServerException & rhs) const diff --git service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java index 1c44789..745e6cc 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java +++ service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java @@ -3023,7 +3023,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fetchN_result struc struct.success = new ArrayList(_list0.size); for (int _i1 = 0; _i1 < _list0.size; ++_i1) { - String _elem2; // optional + String _elem2; // required _elem2 = iprot.readString(); struct.success.add(_elem2); } @@ -3122,7 +3122,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, fetchN_result struct struct.success = new ArrayList(_list5.size); for (int _i6 = 0; _i6 < _list5.size; ++_i6) { - String _elem7; // optional + String _elem7; // required _elem7 = iprot.readString(); struct.success.add(_elem7); } @@ -3785,7 +3785,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fetchAll_result str struct.success = new ArrayList(_list8.size); for (int _i9 = 0; _i9 < _list8.size; ++_i9) { - String _elem10; // optional + String _elem10; // required _elem10 = iprot.readString(); struct.success.add(_elem10); } @@ -3884,7 +3884,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, fetchAll_result stru struct.success = new ArrayList(_list13.size); for (int _i14 = 0; _i14 < _list13.size; ++_i14) { - String _elem15; // optional + String _elem15; // required _elem15 = iprot.readString(); struct.success.add(_elem15); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java index 6b1b054..1ac28a2 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBinaryColumn.java @@ -451,7 +451,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TBinaryColumn struc struct.values = new ArrayList(_list110.size); for (int _i111 = 0; _i111 < _list110.size; ++_i111) { - ByteBuffer _elem112; // optional + ByteBuffer _elem112; // required _elem112 = iprot.readBinary(); struct.values.add(_elem112); } @@ -535,7 +535,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TBinaryColumn struct struct.values = new ArrayList(_list115.size); for (int _i116 = 0; _i116 < _list115.size; ++_i116) { - ByteBuffer _elem117; // optional + ByteBuffer _elem117; // required _elem117 = iprot.readBinary(); struct.values.add(_elem117); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java index efd571c..b2bda35 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TBoolColumn.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TBoolColumn struct) struct.values = new ArrayList(_list54.size); for (int _i55 = 0; _i55 < _list54.size; ++_i55) { - boolean _elem56; // optional + boolean _elem56; // required _elem56 = iprot.readBool(); struct.values.add(_elem56); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TBoolColumn struct) struct.values = new ArrayList(_list59.size); for (int _i60 = 0; _i60 < _list59.size; ++_i60) { - boolean _elem61; // optional + boolean _elem61; // required _elem61 = iprot.readBool(); struct.values.add(_elem61); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java index 169bfde..defdec0 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TByteColumn.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TByteColumn struct) struct.values = new ArrayList(_list62.size); for (int _i63 = 0; _i63 < _list62.size; ++_i63) { - byte _elem64; // optional + byte _elem64; // required _elem64 = iprot.readByte(); struct.values.add(_elem64); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TByteColumn struct) struct.values = new ArrayList(_list67.size); for (int _i68 = 0; _i68 < _list67.size; ++_i68) { - byte _elem69; // optional + byte _elem69; // required _elem69 = iprot.readByte(); struct.values.add(_elem69); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java index 4fc5454..88926c8 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TDoubleColumn.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TDoubleColumn struc struct.values = new ArrayList(_list94.size); for (int _i95 = 0; _i95 < _list94.size; ++_i95) { - double _elem96; // optional + double _elem96; // required _elem96 = iprot.readDouble(); struct.values.add(_elem96); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TDoubleColumn struct struct.values = new ArrayList(_list99.size); for (int _i100 = 0; _i100 < _list99.size; ++_i100) { - double _elem101; // optional + double _elem101; // required _elem101 = iprot.readDouble(); struct.values.add(_elem101); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java index c973fcc..d0c3059 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TGetTablesReq.java @@ -715,7 +715,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTablesReq struc struct.tableTypes = new ArrayList(_list172.size); for (int _i173 = 0; _i173 < _list172.size; ++_i173) { - String _elem174; // optional + String _elem174; // required _elem174 = iprot.readString(); struct.tableTypes.add(_elem174); } @@ -856,7 +856,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TGetTablesReq struct struct.tableTypes = new ArrayList(_list177.size); for (int _i178 = 0; _i178 < _list177.size; ++_i178) { - String _elem179; // optional + String _elem179; // required _elem179 = iprot.readString(); struct.tableTypes.add(_elem179); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java index c836630..e997bd9 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI16Column.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TI16Column struct) struct.values = new ArrayList(_list70.size); for (int _i71 = 0; _i71 < _list70.size; ++_i71) { - short _elem72; // optional + short _elem72; // required _elem72 = iprot.readI16(); struct.values.add(_elem72); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TI16Column struct) t struct.values = new ArrayList(_list75.size); for (int _i76 = 0; _i76 < _list75.size; ++_i76) { - short _elem77; // optional + short _elem77; // required _elem77 = iprot.readI16(); struct.values.add(_elem77); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java index 6c6c5f3..3cdd2a5 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI32Column.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TI32Column struct) struct.values = new ArrayList(_list78.size); for (int _i79 = 0; _i79 < _list78.size; ++_i79) { - int _elem80; // optional + int _elem80; // required _elem80 = iprot.readI32(); struct.values.add(_elem80); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TI32Column struct) t struct.values = new ArrayList(_list83.size); for (int _i84 = 0; _i84 < _list83.size; ++_i84) { - int _elem85; // optional + int _elem85; // required _elem85 = iprot.readI32(); struct.values.add(_elem85); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java index cc383ed..c8fc41f 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TI64Column.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TI64Column struct) struct.values = new ArrayList(_list86.size); for (int _i87 = 0; _i87 < _list86.size; ++_i87) { - long _elem88; // optional + long _elem88; // required _elem88 = iprot.readI64(); struct.values.add(_elem88); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TI64Column struct) t struct.values = new ArrayList(_list91.size); for (int _i92 = 0; _i92 < _list91.size; ++_i92) { - long _elem93; // optional + long _elem93; // required _elem93 = iprot.readI64(); struct.values.add(_elem93); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java index a44cfb0..8e58aa5 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRow.java @@ -354,7 +354,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRow struct) throws struct.colVals = new ArrayList(_list46.size); for (int _i47 = 0; _i47 < _list46.size; ++_i47) { - TColumnValue _elem48; // optional + TColumnValue _elem48; // required _elem48 = new TColumnValue(); _elem48.read(iprot); struct.colVals.add(_elem48); @@ -425,7 +425,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRow struct) throws struct.colVals = new ArrayList(_list51.size); for (int _i52 = 0; _i52 < _list51.size; ++_i52) { - TColumnValue _elem53; // optional + TColumnValue _elem53; // required _elem53 = new TColumnValue(); _elem53.read(iprot); struct.colVals.add(_elem53); diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java index d16c8a4..4bb5749 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TRowSet.java @@ -549,7 +549,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowSet struct) thr struct.rows = new ArrayList(_list118.size); for (int _i119 = 0; _i119 < _list118.size; ++_i119) { - TRow _elem120; // optional + TRow _elem120; // required _elem120 = new TRow(); _elem120.read(iprot); struct.rows.add(_elem120); @@ -568,7 +568,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TRowSet struct) thr struct.columns = new ArrayList(_list121.size); for (int _i122 = 0; _i122 < _list121.size; ++_i122) { - TColumn _elem123; // optional + TColumn _elem123; // required _elem123 = new TColumn(); _elem123.read(iprot); struct.columns.add(_elem123); @@ -673,7 +673,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRowSet struct) thro struct.rows = new ArrayList(_list128.size); for (int _i129 = 0; _i129 < _list128.size; ++_i129) { - TRow _elem130; // optional + TRow _elem130; // required _elem130 = new TRow(); _elem130.read(iprot); struct.rows.add(_elem130); @@ -687,7 +687,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TRowSet struct) thro struct.columns = new ArrayList(_list131.size); for (int _i132 = 0; _i132 < _list131.size; ++_i132) { - TColumn _elem133; // optional + TColumn _elem133; // required _elem133 = new TColumn(); _elem133.read(iprot); struct.columns.add(_elem133); diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java index 24a746e..cb5010f 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStatus.java @@ -698,7 +698,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TStatus struct) thr struct.infoMessages = new ArrayList(_list134.size); for (int _i135 = 0; _i135 < _list134.size; ++_i135) { - String _elem136; // optional + String _elem136; // required _elem136 = iprot.readString(); struct.infoMessages.add(_elem136); } @@ -848,7 +848,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TStatus struct) thro struct.infoMessages = new ArrayList(_list139.size); for (int _i140 = 0; _i140 < _list139.size; ++_i140) { - String _elem141; // optional + String _elem141; // required _elem141 = iprot.readString(); struct.infoMessages.add(_elem141); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java index 3dae460..5398056 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TStringColumn.java @@ -449,7 +449,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TStringColumn struc struct.values = new ArrayList(_list102.size); for (int _i103 = 0; _i103 < _list102.size; ++_i103) { - String _elem104; // optional + String _elem104; // required _elem104 = iprot.readString(); struct.values.add(_elem104); } @@ -533,7 +533,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TStringColumn struct struct.values = new ArrayList(_list107.size); for (int _i108 = 0; _i108 < _list107.size; ++_i108) { - String _elem109; // optional + String _elem109; // required _elem109 = iprot.readString(); struct.values.add(_elem109); } diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java index ff5e54d..e2882c2 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTableSchema.java @@ -354,7 +354,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTableSchema struct struct.columns = new ArrayList(_list38.size); for (int _i39 = 0; _i39 < _list38.size; ++_i39) { - TColumnDesc _elem40; // optional + TColumnDesc _elem40; // required _elem40 = new TColumnDesc(); _elem40.read(iprot); struct.columns.add(_elem40); @@ -425,7 +425,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TTableSchema struct) struct.columns = new ArrayList(_list43.size); for (int _i44 = 0; _i44 < _list43.size; ++_i44) { - TColumnDesc _elem45; // optional + TColumnDesc _elem45; // required _elem45 = new TColumnDesc(); _elem45.read(iprot); struct.columns.add(_elem45); diff --git service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java index 251f86a..d6a6d23 100644 --- service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java +++ service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TTypeDesc.java @@ -354,7 +354,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TTypeDesc struct) t struct.types = new ArrayList(_list30.size); for (int _i31 = 0; _i31 < _list30.size; ++_i31) { - TTypeEntry _elem32; // optional + TTypeEntry _elem32; // required _elem32 = new TTypeEntry(); _elem32.read(iprot); struct.types.add(_elem32); @@ -425,7 +425,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TTypeDesc struct) th struct.types = new ArrayList(_list35.size); for (int _i36 = 0; _i36 < _list35.size; ++_i36) { - TTypeEntry _elem37; // optional + TTypeEntry _elem37; // required _elem37 = new TTypeEntry(); _elem37.read(iprot); struct.types.add(_elem37); diff --git service/src/gen/thrift/gen-py/TCLIService/TCLIService-remote service/src/gen/thrift/gen-py/TCLIService/TCLIService-remote old mode 100644 new mode 100755 diff --git service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote old mode 100644 new mode 100755 diff --git service/src/gen/thrift/gen-py/hive_service/ThriftHive.py service/src/gen/thrift/gen-py/hive_service/ThriftHive.py index 33912f9..d5887ec 100644 --- service/src/gen/thrift/gen-py/hive_service/ThriftHive.py +++ service/src/gen/thrift/gen-py/hive_service/ThriftHive.py @@ -342,7 +342,7 @@ def process_execute(self, seqid, iprot, oprot): result = execute_result() try: self._handler.execute(args.query) - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("execute", TMessageType.REPLY, seqid) result.write(oprot) @@ -356,7 +356,7 @@ def process_fetchOne(self, seqid, iprot, oprot): result = fetchOne_result() try: result.success = self._handler.fetchOne() - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("fetchOne", TMessageType.REPLY, seqid) result.write(oprot) @@ -370,7 +370,7 @@ def process_fetchN(self, seqid, iprot, oprot): result = fetchN_result() try: result.success = self._handler.fetchN(args.numRows) - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("fetchN", TMessageType.REPLY, seqid) result.write(oprot) @@ -384,7 +384,7 @@ def process_fetchAll(self, seqid, iprot, oprot): result = fetchAll_result() try: result.success = self._handler.fetchAll() - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("fetchAll", TMessageType.REPLY, seqid) result.write(oprot) @@ -398,7 +398,7 @@ def process_getSchema(self, seqid, iprot, oprot): result = getSchema_result() try: result.success = self._handler.getSchema() - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("getSchema", TMessageType.REPLY, seqid) result.write(oprot) @@ -412,7 +412,7 @@ def process_getThriftSchema(self, seqid, iprot, oprot): result = getThriftSchema_result() try: result.success = self._handler.getThriftSchema() - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("getThriftSchema", TMessageType.REPLY, seqid) result.write(oprot) @@ -426,7 +426,7 @@ def process_getClusterStatus(self, seqid, iprot, oprot): result = getClusterStatus_result() try: result.success = self._handler.getClusterStatus() - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("getClusterStatus", TMessageType.REPLY, seqid) result.write(oprot) @@ -440,7 +440,7 @@ def process_getQueryPlan(self, seqid, iprot, oprot): result = getQueryPlan_result() try: result.success = self._handler.getQueryPlan() - except HiveServerException as ex: + except HiveServerException, ex: result.ex = ex oprot.writeMessageBegin("getQueryPlan", TMessageType.REPLY, seqid) result.write(oprot)