diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out index 287557fac1..914458be06 100644 --- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out +++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out @@ -68,7 +68,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) diff --git a/contrib/src/test/results/clientpositive/dboutput.q.out b/contrib/src/test/results/clientpositive/dboutput.q.out index 442a98a73d..7427892a0a 100644 --- a/contrib/src/test/results/clientpositive/dboutput.q.out +++ b/contrib/src/test/results/clientpositive/dboutput.q.out @@ -137,7 +137,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dboutput('jdbc:derby:../build/test_dboutput_db','','','INSERT INTO app_info (kkey,vvalue) VALUES (?,?)',key,value) (type: int) diff --git a/contrib/src/test/results/clientpositive/udaf_example_avg.q.out b/contrib/src/test/results/clientpositive/udaf_example_avg.q.out index 4e6cb99226..7fbf22910d 100644 --- a/contrib/src/test/results/clientpositive/udaf_example_avg.q.out +++ b/contrib/src/test/results/clientpositive/udaf_example_avg.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0), null, substr(value, 5)) (type: string) + expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0D), null, substr(value, 5)) (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/contrib/src/test/results/clientpositive/udaf_example_max.q.out b/contrib/src/test/results/clientpositive/udaf_example_max.q.out index 1794c1edfd..e11ad8ec17 100644 --- a/contrib/src/test/results/clientpositive/udaf_example_max.q.out +++ b/contrib/src/test/results/clientpositive/udaf_example_max.q.out @@ -33,7 +33,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0), null, substr(value, 5)) (type: string) + expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0D), null, substr(value, 5)) (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/contrib/src/test/results/clientpositive/udaf_example_max_n.q.out b/contrib/src/test/results/clientpositive/udaf_example_max_n.q.out index 47d8f520cf..f17eef8ed0 100644 --- a/contrib/src/test/results/clientpositive/udaf_example_max_n.q.out +++ b/contrib/src/test/results/clientpositive/udaf_example_max_n.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0), null, substr(value, 5)) (type: string) + expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0D), null, substr(value, 5)) (type: string) outputColumnNames: _col0, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/contrib/src/test/results/clientpositive/udaf_example_min.q.out b/contrib/src/test/results/clientpositive/udaf_example_min.q.out index 4f3f8f86f0..75f33eb7af 100644 --- a/contrib/src/test/results/clientpositive/udaf_example_min.q.out +++ b/contrib/src/test/results/clientpositive/udaf_example_min.q.out @@ -33,7 +33,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0), null, substr(value, 5)) (type: string) + expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) > 250.0D), null, substr(value, 5)) (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/contrib/src/test/results/clientpositive/udaf_example_min_n.q.out b/contrib/src/test/results/clientpositive/udaf_example_min_n.q.out index 16c4684850..ae770c0749 100644 --- a/contrib/src/test/results/clientpositive/udaf_example_min_n.q.out +++ b/contrib/src/test/results/clientpositive/udaf_example_min_n.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) < 250.0), null, substr(value, 5)) (type: string) + expressions: substr(value, 5) (type: string), if((UDFToDouble(substr(value, 5)) < 250.0D), null, substr(value, 5)) (type: string) outputColumnNames: _col0, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/contrib/src/test/results/clientpositive/udf_example_add.q.out b/contrib/src/test/results/clientpositive/udf_example_add.q.out index 7916679e5e..b092383804 100644 --- a/contrib/src/test/results/clientpositive/udf_example_add.q.out +++ b/contrib/src/test/results/clientpositive/udf_example_add.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) + expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003D (type: double), 6.6D (type: double), 11.0D (type: double), 10.4D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/hbase-handler/src/test/results/positive/hbase_ddl.q.out b/hbase-handler/src/test/results/positive/hbase_ddl.q.out index c015f29b90..ef3f5f704e 100644 --- a/hbase-handler/src/test/results/positive/hbase_ddl.q.out +++ b/hbase-handler/src/test/results/positive/hbase_ddl.q.out @@ -68,7 +68,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out index 0fce6c8e24..907c5dc7d4 100644 --- a/hbase-handler/src/test/results/positive/hbase_queries.q.out +++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out @@ -68,7 +68,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) diff --git a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out index e719b087d4..0511c14e2f 100644 --- a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out +++ b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out @@ -173,7 +173,7 @@ STAGE PLANS: alias: hbase_table Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0)) and (time < 200000000000)) (type: boolean) + predicate: (((UDFToDouble(key) > 100.0D) and (UDFToDouble(key) < 400.0D)) and (time < 200000000000L)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp) @@ -221,7 +221,7 @@ STAGE PLANS: alias: hbase_table Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0)) and (time > 100000000000)) (type: boolean) + predicate: (((UDFToDouble(key) > 100.0D) and (UDFToDouble(key) < 400.0D)) and (time > 100000000000L)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp) @@ -271,7 +271,7 @@ STAGE PLANS: alias: hbase_table Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0)) and (time <= 100000000000)) (type: boolean) + predicate: (((UDFToDouble(key) > 100.0D) and (UDFToDouble(key) < 400.0D)) and (time <= 100000000000L)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp) @@ -319,7 +319,7 @@ STAGE PLANS: alias: hbase_table Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0)) and (time >= 200000000000)) (type: boolean) + predicate: (((UDFToDouble(key) > 100.0D) and (UDFToDouble(key) < 400.0D)) and (time >= 200000000000L)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp) diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index a3725c5395..d0ff871df3 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -947,11 +948,18 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { return null; } + @Override + public List getDefaultConstraints(String db_name, String tbl_name) + throws MetaException { + return null; + } + @Override public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws InvalidObjectException, MetaException { return null; } @@ -985,6 +993,12 @@ public void dropConstraint(String dbName, String tableName, return null; } + @Override + public List addDefaultConstraints(List nns) + throws InvalidObjectException, MetaException { + return null; + } + @Override public String getMetastoreDbUuid() throws MetaException { throw new MetaException("getMetastoreDbUuid is not implemented"); diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 2776fe95f1..13af465c9a 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -532,6 +532,7 @@ minillaplocal.query.files=\ correlationoptimizer2.q,\ correlationoptimizer4.q,\ correlationoptimizer6.q,\ + default_constraint.q,\ disable_merge_for_bucketing.q,\ cross_prod_1.q,\ cross_prod_3.q,\ diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 45602a2bec..f7332a42fd 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -1713,7 +1713,8 @@ private void maskPatterns(Pattern[] patterns, String fname) throws Exception { "pk_-?[0-9]*_[0-9]*_[0-9]*", "fk_-?[0-9]*_[0-9]*_[0-9]*", "uk_-?[0-9]*_[0-9]*_[0-9]*", - "nn_-?[0-9]*_[0-9]*_[0-9]*", + "nn_-?[0-9]*_[0-9]*_[0-9]*", // not null constraint name + "dc_-?[0-9]*_[0-9]*_[0-9]*", // default constraint name ".*at com\\.sun\\.proxy.*", ".*at com\\.jolbox.*", ".*at com\\.zaxxer.*", diff --git a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql index a8f227b775..e335b84515 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql @@ -110,7 +110,7 @@ CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHA CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); -CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL); +CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, "DEFAULT_VALUE" VARCHAR(400)); CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000)); diff --git a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql index 84d523e1d7..df36ff2aa1 100644 --- a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql +++ b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql @@ -935,6 +935,7 @@ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` `UPDATE_RULE` string, `DELETE_RULE` string, `ENABLE_VALIDATE_RELY` int, + `DEFAULT_VALUE` string CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) DISABLE ) STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' @@ -953,7 +954,8 @@ TBLPROPERTIES ( \"CONSTRAINT_TYPE\", \"UPDATE_RULE\", \"DELETE_RULE\", - \"ENABLE_VALIDATE_RELY\" + \"ENABLE_VALIDATE_RELY\", + \"DEFAULT_VALUE\" FROM \"KEY_CONSTRAINTS\"" ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index f99178dbc7..4fc0a93b61 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -143,6 +144,7 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.metadata.CheckResult; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -3617,11 +3619,13 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, ForeignKeyInfo fkInfo = null; UniqueConstraint ukInfo = null; NotNullConstraint nnInfo = null; + DefaultConstraint dInfo = null; if (descTbl.isExt() || descTbl.isFormatted()) { pkInfo = db.getPrimaryKeys(tbl.getDbName(), tbl.getTableName()); fkInfo = db.getForeignKeys(tbl.getDbName(), tbl.getTableName()); ukInfo = db.getUniqueConstraints(tbl.getDbName(), tbl.getTableName()); nnInfo = db.getNotNullConstraints(tbl.getDbName(), tbl.getTableName()); + dInfo = db.getDefaultConstraints(tbl.getDbName(), tbl.getTableName()); } fixDecimalColumnTypeName(cols); // In case the query is served by HiveServer2, don't pad it with spaces, @@ -3630,7 +3634,7 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, formatter.describeTable(outStream, colPath, tableName, tbl, part, cols, descTbl.isFormatted(), descTbl.isExt(), isOutputPadded, colStats, - pkInfo, fkInfo, ukInfo, nnInfo); + pkInfo, fkInfo, ukInfo, nnInfo, dInfo); LOG.debug("DDLTask: written data for {}", tableName); @@ -4424,6 +4428,10 @@ private int addConstraints(Hive db, AlterTableDesc alterTbl) && !alterTbl.getNotNullConstraintCols().isEmpty()) { db.addNotNullConstraint(alterTbl.getNotNullConstraintCols()); } + if (alterTbl.getDefaultConstraintCols() != null + && !alterTbl.getDefaultConstraintCols().isEmpty()) { + db.addDefaultConstraint(alterTbl.getDefaultConstraintCols()); + } } catch (NoSuchObjectException e) { throw new HiveException(e); } @@ -4746,6 +4754,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { List foreignKeys = crtTbl.getForeignKeys(); List uniqueConstraints = crtTbl.getUniqueConstraints(); List notNullConstraints = crtTbl.getNotNullConstraints(); + List defaultConstraints = crtTbl.getDefaultConstraints(); LOG.debug("creating table {} on {}",tbl.getFullyQualifiedName(),tbl.getDataLocation()); if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())){ @@ -4771,12 +4780,13 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // replace-mode creates are really alters using CreateTableDesc. db.alterTable(tbl, null); } else { - if ((foreignKeys != null && foreignKeys.size() > 0 ) || + if ((foreignKeys != null && foreignKeys.size() > 0) || (primaryKeys != null && primaryKeys.size() > 0) || (uniqueConstraints != null && uniqueConstraints.size() > 0) || - (notNullConstraints != null && notNullConstraints.size() > 0)) { + (notNullConstraints != null && notNullConstraints.size() > 0) || + defaultConstraints != null && defaultConstraints.size() > 0) { db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); } else { db.createTable(tbl, crtTbl.getIfNotExists()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 32fc257b03..77e9263e0f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -396,6 +396,18 @@ system.registerUDF(serdeConstants.FLOAT_TYPE_NAME, UDFToFloat.class, false, UDFToFloat.class.getSimpleName()); system.registerUDF(serdeConstants.DOUBLE_TYPE_NAME, UDFToDouble.class, false, UDFToDouble.class.getSimpleName()); system.registerUDF(serdeConstants.STRING_TYPE_NAME, UDFToString.class, false, UDFToString.class.getSimpleName()); + // following mapping is to enable UDFName to UDF while generating expression for default value (in operator tree) + // e.g. cast(4 as string) is serialized as UDFToString(4) into metastore, to allow us to generate appropriate UDF for + // UDFToString we need the following mappings + // Rest of the types e.g. DATE, CHAR, VARCHAR etc are already registered + system.registerUDF(UDFToString.class.getSimpleName(), UDFToString.class, false, UDFToString.class.getSimpleName()); + system.registerUDF(UDFToBoolean.class.getSimpleName(), UDFToBoolean.class, false, UDFToBoolean.class.getSimpleName()); + system.registerUDF(UDFToDouble.class.getSimpleName(), UDFToDouble.class, false, UDFToDouble.class.getSimpleName()); + system.registerUDF(UDFToFloat.class.getSimpleName(), UDFToFloat.class, false, UDFToFloat.class.getSimpleName()); + system.registerUDF(UDFToInteger.class.getSimpleName(), UDFToInteger.class, false, UDFToInteger.class.getSimpleName()); + system.registerUDF(UDFToLong.class.getSimpleName(), UDFToLong.class, false, UDFToLong.class.getSimpleName()); + system.registerUDF(UDFToShort.class.getSimpleName(), UDFToShort.class, false, UDFToShort.class.getSimpleName()); + system.registerUDF(UDFToByte.class.getSimpleName(), UDFToByte.class, false, UDFToByte.class.getSimpleName()); system.registerGenericUDF(serdeConstants.DATE_TYPE_NAME, GenericUDFToDate.class); system.registerGenericUDF(serdeConstants.TIMESTAMP_TYPE_NAME, GenericUDFTimestamp.class); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java new file mode 100644 index 0000000000..59df3daf6e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.metadata; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; + +/** + * DefaultConstraintInfo is a metadata structure containing the default constraints + * associated with a table. + */ +@SuppressWarnings("serial") +public class DefaultConstraint implements Serializable { + + public class DefaultConstraintCol { + public String colName; + public String defaultVal; + + public DefaultConstraintCol(String colName, String defaultVal) { + this.colName = colName; + this.defaultVal = defaultVal; + } + } + + // Mapping from constraint name to list of default constraints + Map> defaultConstraints; + + // Mapping from column name to default value + Map colNameToDefaultValueMap; + String tableName; + String databaseName; + + public DefaultConstraint() {} + + public DefaultConstraint(List defaultConstraintList, String tableName, String databaseName) { + this.tableName = tableName; + this.databaseName = databaseName; + defaultConstraints = new TreeMap>(); + colNameToDefaultValueMap = new TreeMap(); + if (defaultConstraintList == null) { + return; + } + for (SQLDefaultConstraint uk : defaultConstraintList) { + if (uk.getTable_db().equalsIgnoreCase(databaseName) && + uk.getTable_name().equalsIgnoreCase(tableName)) { + String colName = uk.getColumn_name(); + String defVal = uk.getDefault_value(); + colNameToDefaultValueMap.put(colName, defVal); + DefaultConstraintCol currCol = new DefaultConstraintCol( + colName, defVal); + String constraintName = uk.getDc_name(); + if (defaultConstraints.containsKey(constraintName)) { + defaultConstraints.get(constraintName).add(currCol); + } else { + List currList = new ArrayList(); + currList.add(currCol); + defaultConstraints.put(constraintName, currList); + } + } + } + } + + public String getTableName() { + return tableName; + } + + public String getDatabaseName() { + return databaseName; + } + + public Map> getDefaultConstraints() { + return defaultConstraints; + } + public Map getColNameToDefaultValueMap() { + return colNameToDefaultValueMap; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Default Constraints for " + databaseName + "." + tableName + ":"); + sb.append("["); + if (defaultConstraints != null && defaultConstraints.size() > 0) { + for (Map.Entry> me : defaultConstraints.entrySet()) { + sb.append(" {Constraint Name: " + me.getKey() + ","); + List currCol = me.getValue(); + if (currCol != null && currCol.size() > 0) { + for (DefaultConstraintCol ukc : currCol) { + sb.append (" (Column Name: " + ukc.colName + ", Default Value: " + ukc.defaultVal + "),"); + } + sb.setLength(sb.length()-1); + } + sb.append("},"); + } + sb.setLength(sb.length()-1); + } + sb.append("]"); + return sb.toString(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index baa90705d4..68a87e6d0f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -18,6 +18,12 @@ package org.apache.hadoop.hive.ql.metadata; +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; @@ -93,6 +99,7 @@ import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FireEventRequest; @@ -118,6 +125,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -126,7 +134,6 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMNullablePool; @@ -844,13 +851,20 @@ public void createTable(Table tbl) throws HiveException { * primary key columns associated with the table * @param foreignKeys * foreign key columns associated with the table + * @param uniqueConstraints + * UNIQUE constraints associated with the table + * @param notNullConstraints + * NOT NULL constraints associated with the table + * @param defaultConstraints + * DEFAULT constraints associated with the table * @throws HiveException */ public void createTable(Table tbl, boolean ifNotExists, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws HiveException { try { if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) { @@ -877,11 +891,11 @@ public void createTable(Table tbl, boolean ifNotExists, } } if (primaryKeys == null && foreignKeys == null - && uniqueConstraints == null && notNullConstraints == null) { + && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null) { getMSC().createTable(tTbl); } else { getMSC().createTableWithConstraints(tTbl, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); } } catch (AlreadyExistsException e) { @@ -894,7 +908,7 @@ public void createTable(Table tbl, boolean ifNotExists, } public void createTable(Table tbl, boolean ifNotExists) throws HiveException { - createTable(tbl, ifNotExists, null, null, null, null); + createTable(tbl, ifNotExists, null, null, null, null, null); } public static List getFieldsFromDeserializerForMsStorage( @@ -4451,6 +4465,18 @@ public void dropConstraint(String dbName, String tableName, String constraintNam } } + public List getDefaultConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { + try { + return getMSC().getDefaultConstraints(new DefaultConstraintsRequest(dbName, tblName)); + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + /** * Get all primary key columns associated with the table. * @@ -4617,6 +4643,30 @@ public NotNullConstraint getEnabledNotNullConstraints(String dbName, String tblN } } + /** + * Get Default constraints associated with the table that are enabled + * + * @param dbName Database Name + * @param tblName Table Name + * @return Default constraints associated with the table. + * @throws HiveException + */ + public DefaultConstraint getEnabledDefaultConstraints(String dbName, String tblName) + throws HiveException { + try { + List defaultConstraints = getMSC().getDefaultConstraints( + new DefaultConstraintsRequest(dbName, tblName)); + if (defaultConstraints != null && !defaultConstraints.isEmpty()) { + defaultConstraints = defaultConstraints.stream() + .filter(nnc -> nnc.isEnable_cstr()) + .collect(Collectors.toList()); + } + return new DefaultConstraint(defaultConstraints, tblName, dbName); + } catch (Exception e) { + throw new HiveException(e); + } + } + private NotNullConstraint getNotNullConstraints(String dbName, String tblName, boolean onlyReliable) throws HiveException { try { @@ -4633,6 +4683,21 @@ private NotNullConstraint getNotNullConstraints(String dbName, String tblName, b } } + public DefaultConstraint getDefaultConstraints(String dbName, String tblName) + throws HiveException { + try { + List defaultConstraints = getMSC().getDefaultConstraints( + new DefaultConstraintsRequest(dbName, tblName)); + if (defaultConstraints != null && !defaultConstraints.isEmpty()) { + defaultConstraints = defaultConstraints.stream() + .collect(Collectors.toList()); + } + return new DefaultConstraint(defaultConstraints, tblName, dbName); + } catch (Exception e) { + throw new HiveException(e); + } + } + public void addPrimaryKey(List primaryKeyCols) throws HiveException, NoSuchObjectException { try { @@ -4669,6 +4734,16 @@ public void addNotNullConstraint(List notNullConstraintCol } } + public void addDefaultConstraint(List defaultConstraints) + throws HiveException, NoSuchObjectException { + try { + getMSC().addDefaultConstraint(defaultConstraints); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) throws HiveException { try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java index 77e5678f80..da82f68f73 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -114,7 +115,7 @@ public void describeTable(DataOutputStream out, String colPath, boolean isFormatted, boolean isExt, boolean isOutputPadded, List colStats, PrimaryKeyInfo pkInfo, ForeignKeyInfo fkInfo, - UniqueConstraint ukInfo, NotNullConstraint nnInfo) throws HiveException { + UniqueConstraint ukInfo, NotNullConstraint nnInfo, DefaultConstraint dInfo) throws HiveException { MapBuilder builder = MapBuilder.create(); builder.put("columns", makeColsUnformatted(cols)); @@ -137,6 +138,9 @@ public void describeTable(DataOutputStream out, String colPath, if (nnInfo != null && !nnInfo.getNotNullConstraints().isEmpty()) { builder.put("notNullConstraintInfo", nnInfo); } + if (dInfo != null && !dInfo.getDefaultConstraints().isEmpty()) { + builder.put("defaultConstraintInfo", dInfo); + } } asJson(out, builder.build()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index a5b6a4b0c3..bfc7b38ceb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -41,15 +41,16 @@ import org.apache.hadoop.hive.metastore.api.WMPoolTrigger; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.UniqueConstraint; import org.apache.hadoop.hive.ql.metadata.UniqueConstraint.UniqueConstraintCol; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo.ForeignKeyCol; -import org.apache.hadoop.hive.ql.metadata.NotNullConstraint; import org.apache.hadoop.hive.ql.plan.DescTableDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.serde2.io.DateWritable; @@ -133,7 +134,7 @@ static ColumnStatisticsObj getColumnStatisticsObject(String colName, } public static String getConstraintsInformation(PrimaryKeyInfo pkInfo, ForeignKeyInfo fkInfo, - UniqueConstraint ukInfo, NotNullConstraint nnInfo) { + UniqueConstraint ukInfo, NotNullConstraint nnInfo, DefaultConstraint dInfo) { StringBuilder constraintsInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); constraintsInfo.append(LINE_DELIM).append("# Constraints").append(LINE_DELIM); @@ -153,6 +154,10 @@ public static String getConstraintsInformation(PrimaryKeyInfo pkInfo, ForeignKey constraintsInfo.append(LINE_DELIM).append("# Not Null Constraints").append(LINE_DELIM); getNotNullConstraintsInformation(constraintsInfo, nnInfo); } + if (dInfo != null && !dInfo.getDefaultConstraints().isEmpty()) { + constraintsInfo.append(LINE_DELIM).append("# Default Constraints").append(LINE_DELIM); + getDefaultConstraintsInformation(constraintsInfo, dInfo); + } return constraintsInfo.toString(); } @@ -253,6 +258,40 @@ private static void getNotNullConstraintsInformation(StringBuilder constraintsIn } } + private static void getDefaultConstraintColInformation(StringBuilder constraintsInfo, + DefaultConstraint.DefaultConstraintCol ukCol) { + String[] fkcFields = new String[2]; + fkcFields[0] = "Column Name:" + ukCol.colName; + fkcFields[1] = "Default Value:" + ukCol.defaultVal; + formatOutput(fkcFields, constraintsInfo); + } + private static void getDefaultConstraintRelInformation( + StringBuilder constraintsInfo, + String constraintName, + List ukRel) { + formatOutput("Constraint Name:", constraintName, constraintsInfo); + if (ukRel != null && ukRel.size() > 0) { + for (DefaultConstraint.DefaultConstraintCol ukc : ukRel) { + getDefaultConstraintColInformation(constraintsInfo, ukc); + } + } + constraintsInfo.append(LINE_DELIM); + } + + private static void getDefaultConstraintsInformation(StringBuilder constraintsInfo, + DefaultConstraint dInfo) { + formatOutput("Table:", + dInfo.getDatabaseName() + "." + dInfo.getTableName(), + constraintsInfo); + Map> defaultConstraints = dInfo.getDefaultConstraints(); + if (defaultConstraints != null && defaultConstraints.size() > 0) { + for (Map.Entry> me : defaultConstraints.entrySet()) { + getDefaultConstraintRelInformation(constraintsInfo, me.getKey(), me.getValue()); + } + } + } + + public static String getPartitionInformation(Partition part) { StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java index 88d5554e1d..6309bfdc2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -89,7 +90,7 @@ public void describeTable(DataOutputStream out, String colPath, boolean isFormatted, boolean isExt, boolean isOutputPadded, List colStats, PrimaryKeyInfo pkInfo, ForeignKeyInfo fkInfo, - UniqueConstraint ukInfo, NotNullConstraint nnInfo) + UniqueConstraint ukInfo, NotNullConstraint nnInfo, DefaultConstraint dInfo) throws HiveException; /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index 607e111c97..006584839a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.WMResourcePlan; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -130,7 +131,7 @@ public void describeTable(DataOutputStream outStream, String colPath, boolean isFormatted, boolean isExt, boolean isOutputPadded, List colStats, PrimaryKeyInfo pkInfo, ForeignKeyInfo fkInfo, - UniqueConstraint ukInfo, NotNullConstraint nnInfo) throws HiveException { + UniqueConstraint ukInfo, NotNullConstraint nnInfo, DefaultConstraint dInfo) throws HiveException { try { List partCols = tbl.isPartitioned() ? tbl.getPartCols() : null; String output = ""; @@ -187,8 +188,9 @@ public void describeTable(DataOutputStream outStream, String colPath, if ((pkInfo != null && !pkInfo.getColNames().isEmpty()) || (fkInfo != null && !fkInfo.getForeignKeys().isEmpty()) || (ukInfo != null && !ukInfo.getUniqueConstraints().isEmpty()) || - (nnInfo != null && !nnInfo.getNotNullConstraints().isEmpty())) { - output = MetaDataFormatUtils.getConstraintsInformation(pkInfo, fkInfo, ukInfo, nnInfo); + (nnInfo != null && !nnInfo.getNotNullConstraints().isEmpty()) || + dInfo != null && !dInfo.getDefaultConstraints().isEmpty()) { + output = MetaDataFormatUtils.getConstraintsInformation(pkInfo, fkInfo, ukInfo, nnInfo, dInfo); outStream.write(output.getBytes("UTF-8")); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 171825eb74..c3c029e89f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -58,6 +59,7 @@ import org.apache.hadoop.hive.ql.cache.results.CacheUsage; import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache; import org.apache.hadoop.hive.ql.exec.FetchTask; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -76,6 +78,7 @@ import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; @@ -83,6 +86,10 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCurrentDate; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCurrentTimestamp; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCurrentUser; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -643,7 +650,8 @@ private static String spliceString(String str, int i, int length, String replace */ public static List getColumns(ASTNode ast, boolean lowerCase) throws SemanticException { return getColumns(ast, lowerCase, new ArrayList(), new ArrayList(), - new ArrayList(), new ArrayList()); + new ArrayList(), new ArrayList(), + new ArrayList()); } private static class ConstraintInfo { @@ -652,6 +660,7 @@ private static String spliceString(String str, int i, int length, String replace final boolean enable; final boolean validate; final boolean rely; + final String defaultValue; ConstraintInfo(String colName, String constraintName, boolean enable, boolean validate, boolean rely) { @@ -660,6 +669,16 @@ private static String spliceString(String str, int i, int length, String replace this.enable = enable; this.validate = validate; this.rely = rely; + this.defaultValue = null; + } + ConstraintInfo(String colName, String constraintName, + boolean enable, boolean validate, boolean rely, String defaultValue) { + this.colName = colName; + this.constraintName = constraintName; + this.enable = enable; + this.validate = validate; + this.rely = rely; + this.defaultValue = defaultValue; } } @@ -677,7 +696,7 @@ protected static void processPrimaryKeys(String databaseName, String tableName, ASTNode child, List columnNames, List primaryKeys) throws SemanticException { List primaryKeyInfos = new ArrayList(); - generateConstraintInfos(child, columnNames, primaryKeyInfos); + generateConstraintInfos(child, columnNames, primaryKeyInfos, null); constraintInfosToPrimaryKeys(databaseName, tableName, primaryKeyInfos, primaryKeys); } @@ -705,7 +724,7 @@ protected static void processUniqueConstraints(String databaseName, String table ASTNode child, List columnNames, List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); - generateConstraintInfos(child, columnNames, uniqueInfos); + generateConstraintInfos(child, columnNames, uniqueInfos, null); constraintInfosToUniqueConstraints(databaseName, tableName, uniqueInfos, uniqueConstraints); } @@ -718,11 +737,28 @@ private static void constraintInfosToUniqueConstraints(String databaseName, Stri } } + protected static void processDefaultConstraints(String databaseName, String tableName, + ASTNode child, List columnNames, List defaultConstraints, final ASTNode typeChild) + throws SemanticException { + List defaultInfos = new ArrayList(); + generateConstraintInfos(child, columnNames, defaultInfos, typeChild); + constraintInfosToDefaultConstraints(databaseName, tableName, defaultInfos, defaultConstraints); + } + + private static void constraintInfosToDefaultConstraints(String databaseName, String tableName, + List defaultInfos, List defaultConstraints) { + for (ConstraintInfo defaultInfo : defaultInfos) { + defaultConstraints.add(new SQLDefaultConstraint(databaseName, tableName, defaultInfo.colName, + defaultInfo.defaultValue, defaultInfo.constraintName, defaultInfo.enable, + defaultInfo.validate, defaultInfo.rely)); + } + } + protected static void processNotNullConstraints(String databaseName, String tableName, ASTNode child, List columnNames, List notNullConstraints) throws SemanticException { List notNullInfos = new ArrayList(); - generateConstraintInfos(child, columnNames, notNullInfos); + generateConstraintInfos(child, columnNames, notNullInfos, null); constraintInfosToNotNullConstraints(databaseName, tableName, notNullInfos, notNullConstraints); } @@ -749,19 +785,87 @@ private static void generateConstraintInfos(ASTNode child, checkColumnName(columnName.getText()); columnNames.add(unescapeIdentifier(columnName.getText().toLowerCase())); } - generateConstraintInfos(child, columnNames.build(), cstrInfos); + generateConstraintInfos(child, columnNames.build(), cstrInfos, null); + } + + private static boolean isDefaultValueAllowed(final ExprNodeDesc defaultValExpr) { + if(defaultValExpr instanceof ExprNodeConstantDesc) { + return true; + } + else if(FunctionRegistry.isOpCast(defaultValExpr)) { + return isDefaultValueAllowed(defaultValExpr.getChildren().get(0)); + } + else if(defaultValExpr instanceof ExprNodeGenericFuncDesc){ + ExprNodeGenericFuncDesc defFunc = (ExprNodeGenericFuncDesc)defaultValExpr; + if(defFunc.getGenericUDF() instanceof GenericUDFOPNull + || defFunc.getGenericUDF() instanceof GenericUDFCurrentTimestamp + || defFunc.getGenericUDF() instanceof GenericUDFCurrentDate + || defFunc.getGenericUDF() instanceof GenericUDFCurrentUser){ + return true; + } + } + return false; } + /** + * Validate and get the default value from the AST + * @param defaultValueAST AST node corresponding to default value + * @return retrieve the default value and return it as string + * @throws SemanticException + */ + private static String getDefaultValue(ASTNode defaultValueAST, ASTNode typeChild) throws SemanticException{ + // first create expression from defaultValueAST + TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null); + ExprNodeDesc defaultValExpr = TypeCheckProcFactory + .genExprNode(defaultValueAST, typeCheckCtx).get(defaultValueAST); + + if(defaultValExpr == null) { + throw new SemanticException( + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid Default value!")); + } + + //get default value to be be stored in metastore + String defaultValueText = defaultValExpr.getExprString(); + final int DEFAULT_MAX_LEN = 255; + if(defaultValueText.length() > DEFAULT_MAX_LEN) { + throw new SemanticException( + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid Default value: " + defaultValueText + + " .Maximum character length allowed is " + DEFAULT_MAX_LEN +" .")); + } + + // Make sure the default value expression type is exactly same as column's type. + TypeInfo defaultValTypeInfo = defaultValExpr.getTypeInfo(); + TypeInfo colTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(getTypeStringFromAST(typeChild)); + if(!defaultValTypeInfo.equals(colTypeInfo)) { + throw new SemanticException( + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid type: " + defaultValTypeInfo.getTypeName() + + " for default value: " + + defaultValueText + + ". Please make sure that the type is compatible with column type: " + + colTypeInfo.getTypeName())); + } + + // throw an error if default value isn't what hive allows + if(!isDefaultValueAllowed(defaultValExpr)) { + throw new SemanticException( + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid Default value: " + defaultValueText + + ". DEFAULT only allows constant or function expressions")); + } + return defaultValueText; + } + + /** * Get the constraint from the AST and populate the cstrInfos with the required * information. * @param child The node with the constraint token * @param columnNames The name of the columns for the primary key * @param cstrInfos Constraint information + * @param typeChildForDefault type of column used for default value type check * @throws SemanticException */ private static void generateConstraintInfos(ASTNode child, List columnNames, - List cstrInfos) throws SemanticException { + List cstrInfos, ASTNode typeChildForDefault) throws SemanticException { // The ANTLR grammar looks like : // 1. KW_CONSTRAINT idfr=identifier KW_PRIMARY KW_KEY pkCols=columnParenthesesList // constraintOptsCreate? @@ -773,9 +877,12 @@ private static void generateConstraintInfos(ASTNode child, List columnNa // when the user does not specify the constraint name. // Default values String constraintName = null; + //by default if user hasn't provided any optional constraint properties + // it will be considered ENABLE and NOVALIDATE and RELY=false boolean enable = true; - boolean validate = true; + boolean validate = false; boolean rely = false; + String defaultValue = null; for (int i = 0; i < child.getChildCount(); i++) { ASTNode grandChild = (ASTNode) child.getChild(i); int type = grandChild.getToken().getType(); @@ -797,12 +904,25 @@ private static void generateConstraintInfos(ASTNode child, List columnNa validate = false; } else if (type == HiveParser.TOK_RELY) { rely = true; + } else if( type == HiveParser.TOK_NORELY) { + rely = false; + } else if( child.getToken().getType() == HiveParser.TOK_DEFAULT_VALUE){ + // try to get default value only if this is DEFAULT constraint + defaultValue = getDefaultValue(grandChild, typeChildForDefault); } } + // metastore schema only allows maximum 255 for constraint name column + final int CONSTRAINT_NAME_MAX_LENGTH = 255; + if(constraintName != null && constraintName.length() > CONSTRAINT_NAME_MAX_LENGTH) { + throw new SemanticException( + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraint name: " + constraintName + " exceeded maximum allowed " + + "length: " + CONSTRAINT_NAME_MAX_LENGTH )); + } + // NOT NULL constraint could be enforced/enabled - if (child.getToken().getType() != HiveParser.TOK_NOT_NULL - && enable) { + if (enable && child.getToken().getType() != HiveParser.TOK_NOT_NULL + && child.getToken().getType() != HiveParser.TOK_DEFAULT_VALUE) { throw new SemanticException( ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("ENABLE/ENFORCED feature not supported yet. " + "Please use DISABLE/NOT ENFORCED instead.")); @@ -815,13 +935,12 @@ private static void generateConstraintInfos(ASTNode child, List columnNa for (String columnName : columnNames) { cstrInfos.add(new ConstraintInfo(columnName, constraintName, - enable, validate, rely)); + enable, validate, rely, defaultValue)); } } /** * Process the foreign keys from the AST and populate the foreign keys in the SQLForeignKey list - * @param parent Parent of the foreign key token node * @param child Foreign Key token node * @param foreignKeys SQLForeignKey list * @throws SemanticException @@ -906,7 +1025,8 @@ protected static void processForeignKeys(String databaseName, String tableName, } } - protected boolean hasEnabledOrValidatedConstraints(List notNullConstraints){ + protected boolean hasEnabledOrValidatedConstraints(List notNullConstraints, + List defaultConstraints){ if(notNullConstraints != null) { for (SQLNotNullConstraint nnC : notNullConstraints) { if (nnC.isEnable_cstr() || nnC.isValidate_cstr()) { @@ -914,6 +1034,13 @@ protected boolean hasEnabledOrValidatedConstraints(List no } } } + if(defaultConstraints!= null) { + for (SQLDefaultConstraint nnC : defaultConstraints) { + if (nnC.isEnable_cstr() || nnC.isValidate_cstr()) { + return true; + } + } + } return false; } @@ -929,7 +1056,8 @@ private static void checkColumnName(String columnName) throws SemanticException */ public static List getColumns(ASTNode ast, boolean lowerCase, List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints) + List uniqueConstraints, List notNullConstraints, + List defaultConstraints) throws SemanticException { List colList = new ArrayList(); Tree parent = ast.getParent(); @@ -987,6 +1115,10 @@ private static void checkColumnName(String columnName) throws SemanticException String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); // Process column constraint switch (constraintChild.getToken().getType()) { + case HiveParser.TOK_DEFAULT_VALUE: + processDefaultConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + ImmutableList.of(col.getName()), defaultConstraints, typeChild); + break; case HiveParser.TOK_NOT_NULL: processNotNullConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), notNullConstraints); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index e3934240e8..1793402471 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -86,6 +87,7 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; @@ -1652,10 +1654,13 @@ private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expec private boolean hasConstraintsEnabled(final String tblName) throws SemanticException{ NotNullConstraint nnc = null; + DefaultConstraint dc = null; try { // retrieve enabled NOT NULL constraint from metastore nnc = Hive.get().getEnabledNotNullConstraints( db.getDatabaseCurrent().getName(), tblName); + dc = Hive.get().getEnabledDefaultConstraints( + db.getDatabaseCurrent().getName(), tblName); } catch (Exception e) { if (e instanceof SemanticException) { throw (SemanticException) e; @@ -1663,7 +1668,8 @@ private boolean hasConstraintsEnabled(final String tblName) throws SemanticExcep throw (new RuntimeException(e)); } } - if(nnc != null && !nnc.getNotNullConstraints().isEmpty()) { + if((nnc != null && !nnc.getNotNullConstraints().isEmpty()) + || (dc != null && !dc.getDefaultConstraints().isEmpty())) { return true; } return false; @@ -3109,32 +3115,38 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, List foreignKeys = null; List uniqueConstraints = null; List notNullConstraints = null; + List defaultConstraints= null; if (constraintChild != null) { // Process column constraint switch (constraintChild.getToken().getType()) { - case HiveParser.TOK_NOT_NULL: - notNullConstraints = new ArrayList<>(); - processNotNullConstraints(qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), notNullConstraints); - break; - case HiveParser.TOK_UNIQUE: - uniqueConstraints = new ArrayList<>(); - processUniqueConstraints(qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), uniqueConstraints); - break; - case HiveParser.TOK_PRIMARY_KEY: - primaryKeys = new ArrayList<>(); - processPrimaryKeys(qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), primaryKeys); - break; - case HiveParser.TOK_FOREIGN_KEY: - foreignKeys = new ArrayList<>(); - processForeignKeys(qualified[0], qualified[1], constraintChild, - foreignKeys); - break; - default: - throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( - constraintChild.getToken().getText())); + case HiveParser.TOK_DEFAULT_VALUE: + defaultConstraints = new ArrayList<>(); + processDefaultConstraints(qualified[0], qualified[1], constraintChild, + ImmutableList.of(newColName), defaultConstraints, (ASTNode)ast.getChild(2)); + break; + case HiveParser.TOK_NOT_NULL: + notNullConstraints = new ArrayList<>(); + processNotNullConstraints(qualified[0], qualified[1], constraintChild, + ImmutableList.of(newColName), notNullConstraints); + break; + case HiveParser.TOK_UNIQUE: + uniqueConstraints = new ArrayList<>(); + processUniqueConstraints(qualified[0], qualified[1], constraintChild, + ImmutableList.of(newColName), uniqueConstraints); + break; + case HiveParser.TOK_PRIMARY_KEY: + primaryKeys = new ArrayList<>(); + processPrimaryKeys(qualified[0], qualified[1], constraintChild, + ImmutableList.of(newColName), primaryKeys); + break; + case HiveParser.TOK_FOREIGN_KEY: + foreignKeys = new ArrayList<>(); + processForeignKeys(qualified[0], qualified[1], constraintChild, + foreignKeys); + break; + default: + throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( + constraintChild.getToken().getText())); } } @@ -3142,7 +3154,7 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, Table tab = getTable(qualified); if(tab.getTableType() == TableType.EXTERNAL_TABLE - && hasEnabledOrValidatedConstraints(notNullConstraints)){ + && hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints)){ throw new SemanticException( ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed.")); @@ -3159,7 +3171,7 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, String tblName = getDotName(qualified); AlterTableDesc alterTblDesc; if (primaryKeys == null && foreignKeys == null - && uniqueConstraints == null && notNullConstraints == null) { + && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null) { alterTblDesc = new AlterTableDesc(tblName, partSpec, unescapeIdentifier(oldColName), unescapeIdentifier(newColName), newType, newComment, first, flagCol, isCascade); @@ -3167,7 +3179,7 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, alterTblDesc = new AlterTableDesc(tblName, partSpec, unescapeIdentifier(oldColName), unescapeIdentifier(newColName), newType, newComment, first, flagCol, isCascade, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); } addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 733ec79ce1..0c6aece1df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -109,6 +109,7 @@ TOK_NOT_NULL; TOK_UNIQUE; TOK_PRIMARY_KEY; TOK_FOREIGN_KEY; +TOK_DEFAULT_VALUE; TOK_VALIDATE; TOK_NOVALIDATE; TOK_RELY; @@ -2371,8 +2372,8 @@ colConstraint @after { popMsg(state); } : (KW_CONSTRAINT constraintName=identifier)? columnConstraintType constraintOptsCreate? -> {$constraintName.tree != null}? - ^(columnConstraintType ^(TOK_CONSTRAINT_NAME $constraintName) constraintOptsCreate?) - -> ^(columnConstraintType constraintOptsCreate?) + ^({$columnConstraintType.tree} ^(TOK_CONSTRAINT_NAME $constraintName) constraintOptsCreate?) + -> ^({$columnConstraintType.tree} constraintOptsCreate?) ; alterColumnConstraint[CommonTree fkColName] @@ -2396,15 +2397,22 @@ alterColConstraint @after { popMsg(state); } : (KW_CONSTRAINT constraintName=identifier)? columnConstraintType constraintOptsAlter? -> {$constraintName.tree != null}? - ^(columnConstraintType ^(TOK_CONSTRAINT_NAME $constraintName) constraintOptsAlter?) - -> ^(columnConstraintType constraintOptsAlter?) + ^({$columnConstraintType.tree} ^(TOK_CONSTRAINT_NAME $constraintName) constraintOptsAlter?) + -> ^({$columnConstraintType.tree} constraintOptsAlter?) ; columnConstraintType : KW_NOT KW_NULL -> TOK_NOT_NULL + | KW_DEFAULT defaultVal-> ^(TOK_DEFAULT_VALUE defaultVal) | tableConstraintType ; +defaultVal + : constant + | function + | castExpression + ; + tableConstraintType : KW_PRIMARY KW_KEY -> TOK_PRIMARY_KEY | KW_UNIQUE -> TOK_UNIQUE diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index cd6f1ee692..82f41a0da0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -131,6 +132,7 @@ import org.apache.hadoop.hive.ql.lib.GraphWalker; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.DummyPartition; @@ -4353,6 +4355,70 @@ static boolean isRegex(String pattern, HiveConf conf) { return output; } + private RowResolver getColForInsertStmtSpec(Map targetCol2Projection, final Table target, + Map targetCol2ColumnInfo, int colListPos, + List targetTableColTypes, ArrayList new_col_list, + List targetTableColNames) + throws SemanticException { + RowResolver newOutputRR = new RowResolver(); + Map colNameToDefaultVal = null; + + // see if we need to fetch default constraints from metastore + if(targetCol2Projection.size() < targetTableColNames.size()) { + try { + DefaultConstraint dc = Hive.get().getEnabledDefaultConstraints(target.getDbName(), target.getTableName()); + colNameToDefaultVal = dc.getColNameToDefaultValueMap(); + } catch (Exception e) { + if (e instanceof SemanticException) { + throw (SemanticException) e; + } else { + throw (new RuntimeException(e)); + } + } + + } + boolean defaultConstraintsFetch = true; + for (int i = 0; i < targetTableColNames.size(); i++) { + String f = targetTableColNames.get(i); + if(targetCol2Projection.containsKey(f)) { + //put existing column in new list to make sure it is in the right position + new_col_list.add(targetCol2Projection.get(f)); + ColumnInfo ci = targetCol2ColumnInfo.get(f); + ci.setInternalName(getColumnInternalName(colListPos)); + newOutputRR.put(ci.getTabAlias(), ci.getInternalName(), ci); + } + else { + //add new 'synthetic' columns for projections not provided by Select + assert(colNameToDefaultVal != null); + ExprNodeDesc exp = null; + if(colNameToDefaultVal.containsKey(f)) { + // make an expression for default value + String defaultValue = colNameToDefaultVal.get(f); + ParseDriver parseDriver = new ParseDriver(); + try { + ASTNode defValAst = parseDriver.parseExpression(defaultValue); + + exp = TypeCheckProcFactory.genExprNode(defValAst, new TypeCheckCtx(null)).get(defValAst); + } catch(Exception e) { + throw new SemanticException("Error while parsing default value: " + defaultValue + + ". Error message: " + e.getMessage()); + } + LOG.debug("Added default value from metastore: " + exp); + } + else { + exp = new ExprNodeConstantDesc(targetTableColTypes.get(i), null); + } + new_col_list.add(exp); + final String tableAlias = null;//this column doesn't come from any table + ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(colListPos), + exp.getWritableObjectInspector(), tableAlias, false); + newOutputRR.put(colInfo.getTabAlias(), colInfo.getInternalName(), colInfo); + } + colListPos++; + } + return newOutputRR; + } + /** * This modifies the Select projections when the Select is part of an insert statement and * the insert statement specifies a column list for the target table, e.g. @@ -4421,29 +4487,12 @@ public RowResolver handleInsertStatementSpec(List col_list, String } } } - RowResolver newOutputRR = new RowResolver(); + //now make the select produce , with //where missing columns are NULL-filled - for (int i = 0; i < targetTableColNames.size(); i++) { - String f = targetTableColNames.get(i); - if(targetCol2Projection.containsKey(f)) { - //put existing column in new list to make sure it is in the right position - new_col_list.add(targetCol2Projection.get(f)); - ColumnInfo ci = targetCol2ColumnInfo.get(f);//todo: is this OK? - ci.setInternalName(getColumnInternalName(colListPos)); - newOutputRR.put(ci.getTabAlias(), ci.getInternalName(), ci); - } - else { - //add new 'synthetic' columns for projections not provided by Select - ExprNodeDesc exp = new ExprNodeConstantDesc(targetTableColTypes.get(i), null); - new_col_list.add(exp); - final String tableAlias = null;//this column doesn't come from any table - ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(colListPos), - exp.getWritableObjectInspector(), tableAlias, false); - newOutputRR.put(colInfo.getTabAlias(), colInfo.getInternalName(), colInfo); - } - colListPos++; - } + Table tbl = target == null? partition.getTable() : target; + RowResolver newOutputRR = getColForInsertStmtSpec(targetCol2Projection, tbl, targetCol2ColumnInfo, colListPos, + targetTableColTypes, new_col_list, targetTableColNames); col_list.clear(); col_list.addAll(new_col_list); return newOutputRR; @@ -12418,6 +12467,29 @@ private void validate(Task task, boolean reworkMapredWor return retValue; } + /** + * Checks to see if given partition columns has constraints (wheather enabled or disabled) + * @param partCols partition columns + * @param defConstraints default constraints + * @param notNullConstraints not null constraints + * @return + */ + boolean hasConstraints(final List partCols, final List defConstraints, + final List notNullConstraints) { + for(FieldSchema partFS: partCols) { + for(SQLDefaultConstraint dc:defConstraints) { + if(dc.getColumn_name().equals(partFS.getName())) { + return true; + } + } + for(SQLNotNullConstraint nc:notNullConstraints) { + if(nc.getColumn_name().equals(partFS.getName())) { + return true; + } + } + } + return false; + } /** * Analyze the create table command. If it is a regular create-table or * create-table-like statements, we create a DDLWork and return true. If it is @@ -12439,6 +12511,7 @@ ASTNode analyzeCreateTable( List foreignKeys = new ArrayList(); List uniqueConstraints = new ArrayList<>(); List notNullConstraints = new ArrayList<>(); + List defaultConstraints= new ArrayList<>(); List sortCols = new ArrayList(); int numBuckets = -1; String comment = null; @@ -12532,14 +12605,20 @@ ASTNode analyzeCreateTable( break; case HiveParser.TOK_TABCOLLIST: cols = getColumns(child, true, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); break; case HiveParser.TOK_TABLECOMMENT: comment = unescapeSQLString(child.getChild(0).getText()); break; case HiveParser.TOK_TABLEPARTCOLS: partCols = getColumns(child, false, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); + if(hasConstraints(partCols, defaultConstraints, notNullConstraints)) { + //TODO: these constraints should be supported for partition columns + throw new SemanticException( + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("NOT NULL and Default Constraints are not allowed with " + + "partition columns. ")); + } break; case HiveParser.TOK_ALTERTABLE_BUCKETS: bucketCols = getColumnNames((ASTNode) child.getChild(0)); @@ -12597,7 +12676,7 @@ ASTNode analyzeCreateTable( throw new SemanticException("Unrecognized command."); } - if(isExt && hasEnabledOrValidatedConstraints(notNullConstraints)){ + if(isExt && hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints)){ throw new SemanticException( ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed.")); @@ -12655,7 +12734,7 @@ ASTNode analyzeCreateTable( comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, - skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); crtTblDesc.setStoredAsSubDirectories(storedAsDirs); crtTblDesc.setNullFormat(rowFormatParams.nullFormat); @@ -12754,7 +12833,7 @@ ASTNode analyzeCreateTable( storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); tableDesc.setMaterialization(isMaterialization); tableDesc.setStoredAsSubDirectories(storedAsDirs); tableDesc.setNullFormat(rowFormatParams.nullFormat); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java index 1a7e7e3354..3e7b3a1d6a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java @@ -638,6 +638,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ASTNode expr = (ASTNode) nd; ASTNode parent = stack.size() > 1 ? (ASTNode) stack.get(stack.size() - 2) : null; RowResolver input = ctx.getInputRR(); + if(input == null) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr), expr); + return null; + } if (expr.getType() != HiveParser.TOK_TABLE_OR_COL) { ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr), expr); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java index 9c12e7e2af..3425858cbc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -64,7 +65,7 @@ } AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), new ArrayList(), - new ArrayList(), nns, context.eventOnlyReplicationSpec()); + new ArrayList(), nns, new ArrayList(), context.eventOnlyReplicationSpec()); Task addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 00c0381107..bf86cec488 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -36,7 +37,6 @@ import java.io.Serializable; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -132,6 +132,7 @@ List foreignKeyCols; List uniqueConstraintCols; List notNullConstraintCols; + List defaultConstraintsCols; ReplicationSpec replicationSpec; public AlterTableDesc() { @@ -167,7 +168,7 @@ public AlterTableDesc(String tblName, HashMap partSpec, String oldColName, String newColName, String newType, String newComment, boolean first, String afterCol, boolean isCascade, List primaryKeyCols, List foreignKeyCols, List uniqueConstraintCols, - List notNullConstraintCols) { + List notNullConstraintCols, List defaultConstraints) { super(); oldName = tblName; this.partSpec = partSpec; @@ -183,6 +184,7 @@ public AlterTableDesc(String tblName, HashMap partSpec, this.foreignKeyCols = foreignKeyCols; this.uniqueConstraintCols = uniqueConstraintCols; this.notNullConstraintCols = notNullConstraintCols; + this.defaultConstraintsCols = defaultConstraints; } /** @@ -342,12 +344,14 @@ public AlterTableDesc(String tableName, List primaryKeyCols, public AlterTableDesc(String tableName, List primaryKeyCols, List foreignKeyCols, List uniqueConstraintCols, - List notNullConstraintCols, ReplicationSpec replicationSpec) { + List notNullConstraintCols, List defaultConstraints, + ReplicationSpec replicationSpec) { this.oldName = tableName; this.primaryKeyCols = primaryKeyCols; this.foreignKeyCols = foreignKeyCols; this.uniqueConstraintCols = uniqueConstraintCols; this.notNullConstraintCols = notNullConstraintCols; + this.defaultConstraintsCols = defaultConstraints; this.replicationSpec = replicationSpec; op = AlterTableTypes.ADDCONSTRAINT; } @@ -540,6 +544,13 @@ public void setForeignKeyCols(List foreignKeyCols) { return notNullConstraintCols; } + /** + * @return the default constraint cols + */ + public List getDefaultConstraintCols() { + return defaultConstraintsCols; + } + /** * @return the drop constraint name of the table */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 6228d4c803..ff9df3d87a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -100,6 +101,7 @@ List foreignKeys; List uniqueConstraints; List notNullConstraints; + List defaultConstraints; private Long initialMmWriteId; // Initial MM write ID for CTAS and import. // The FSOP configuration for the FSOP that is going to write initial data during ctas. // This is not needed beyond compilation, so it is transient. @@ -119,14 +121,15 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal Map tblProps, boolean ifNotExists, List skewedColNames, List> skewedColValues, List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints) { + List uniqueConstraints, List notNullConstraints, + List defaultConstraints) { this(tableName, isExternal, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, tblProps, ifNotExists, skewedColNames, skewedColValues, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); this.databaseName = databaseName; } @@ -142,13 +145,14 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal Map tblProps, boolean ifNotExists, List skewedColNames, List> skewedColValues, boolean isCTAS, List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints) { + List uniqueConstraints, List notNullConstraints, + List defaultConstraints) { this(databaseName, tableName, isExternal, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, tblProps, ifNotExists, skewedColNames, skewedColValues, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); this.isCTAS = isCTAS; } @@ -165,7 +169,8 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary Map tblProps, boolean ifNotExists, List skewedColNames, List> skewedColValues, List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints) { + List uniqueConstraints, List notNullConstraints, + List defaultConstraints) { this.tableName = tableName; this.isExternal = isExternal; this.isTemporary = isTemporary; @@ -194,6 +199,7 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary this.foreignKeys = copyList(foreignKeys); this.uniqueConstraints = copyList(uniqueConstraints); this.notNullConstraints = copyList(notNullConstraints); + this.defaultConstraints = copyList(defaultConstraints); } private static List copyList(List copy) { @@ -272,6 +278,10 @@ public void setForeignKeys(ArrayList foreignKeys) { return notNullConstraints; } + public List getDefaultConstraints() { + return defaultConstraints; + } + @Explain(displayName = "bucket columns") public List getBucketCols() { return bucketCols; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java index 73f449fc28..00aff8e7eb 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java @@ -116,11 +116,36 @@ private static String getFormatted(TypeInfo typeInfo, Object value) { hexChars[j * 2 + 1] = hexArray[v & 0x0F]; } return new String(hexChars); + } else if(typeInfo.getTypeName().equals(serdeConstants.DATE_TYPE_NAME)) { + return "DATE'" + value.toString() + "'"; + } else if(typeInfo.getTypeName().equals(serdeConstants.TIMESTAMP_TYPE_NAME)) { + return "TIMESTAMP'" + value.toString() + "'"; + } else if(typeInfo.getTypeName().equals(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME)) { + return "TIMESTAMPLOCALTZ'" + value.toString() + "'"; + } else if(typeInfo.getTypeName().equals(serdeConstants.TINYINT_TYPE_NAME)) { + return value.toString() + "Y"; + } else if(typeInfo.getTypeName().equals(serdeConstants.SMALLINT_TYPE_NAME)) { + return value.toString() + "S"; + } else if(typeInfo.getTypeName().equals(serdeConstants.BIGINT_TYPE_NAME)) { + return value.toString() + "L"; + } else if(typeInfo.getTypeName().equals(serdeConstants.DOUBLE_TYPE_NAME)) { + return value.toString() + "D"; + } else if(typeInfo.getTypeName().equals(serdeConstants.DECIMAL_TYPE_NAME)) { + return value.toString() + "BD"; + } else if(typeInfo.getTypeName().equals(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME) + || typeInfo.getTypeName().equals(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME)) { + return "INTERVAL'" + value.toString() + "'"; } return value.toString(); } @Override + /** + * Return string representation of constant expression + * Beside ExplainPlan task Default constraint also make use it to deserialize constant expression + * to store it in metastore, which is later reparsed to generate appropriate constant expression + * Therefore it is necessary for this method to qualify the intervals with appropriate qualifiers + */ public String getExprString() { if (typeInfo.getCategory() == Category.PRIMITIVE) { return getFormatted(typeInfo, value); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index fcbac7d840..a4e21c17a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -82,7 +82,8 @@ public ImportTableDesc(String dbName, Table table) throws Exception { null, null, null, - null); + null, + null); this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); break; case VIEW: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToChar.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToChar.java index b98ec68158..899abf76b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToChar.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToChar.java @@ -93,6 +93,7 @@ public String getDisplayString(String[] children) { sb.append(" AS CHAR("); sb.append("" + typeInfo.getLength()); sb.append(")"); + sb.append(")"); return sb.toString(); } diff --git a/ql/src/test/queries/clientnegative/alter_external_with_default_constraint.q b/ql/src/test/queries/clientnegative/alter_external_with_default_constraint.q new file mode 100644 index 0000000000..5169f95b6d --- /dev/null +++ b/ql/src/test/queries/clientnegative/alter_external_with_default_constraint.q @@ -0,0 +1,3 @@ +CREATE external TABLE table1 (a STRING, b STRING); +Alter table table1 change b b STRING DEFAULT 'hive' enable; + diff --git a/ql/src/test/queries/clientnegative/alter_external_with_constraint.q b/ql/src/test/queries/clientnegative/alter_external_with_notnull_constraint.q similarity index 100% rename from ql/src/test/queries/clientnegative/alter_external_with_constraint.q rename to ql/src/test/queries/clientnegative/alter_external_with_notnull_constraint.q diff --git a/ql/src/test/queries/clientnegative/alter_tableprops_external_with_default_constraint.q b/ql/src/test/queries/clientnegative/alter_tableprops_external_with_default_constraint.q new file mode 100644 index 0000000000..c93ceba7e9 --- /dev/null +++ b/ql/src/test/queries/clientnegative/alter_tableprops_external_with_default_constraint.q @@ -0,0 +1,3 @@ +CREATE TABLE table1 (a STRING DEFAULT 'hive', b STRING); +Alter table table1 set TBLPROPERTIES('external'='true'); + diff --git a/ql/src/test/queries/clientnegative/alter_tableprops_external_with_constraint.q b/ql/src/test/queries/clientnegative/alter_tableprops_external_with_notnull_constraint.q similarity index 100% rename from ql/src/test/queries/clientnegative/alter_tableprops_external_with_constraint.q rename to ql/src/test/queries/clientnegative/alter_tableprops_external_with_notnull_constraint.q diff --git a/ql/src/test/queries/clientnegative/constraint_duplicate_name.q b/ql/src/test/queries/clientnegative/constraint_duplicate_name.q new file mode 100644 index 0000000000..2b7429dc80 --- /dev/null +++ b/ql/src/test/queries/clientnegative/constraint_duplicate_name.q @@ -0,0 +1,2 @@ +create table t(i int constraint c1 not null enable); +create table t1(j int constraint c1 default 4); diff --git a/ql/src/test/queries/clientnegative/constraint_invalide_name.q b/ql/src/test/queries/clientnegative/constraint_invalide_name.q new file mode 100644 index 0000000000..a354d57032 --- /dev/null +++ b/ql/src/test/queries/clientnegative/constraint_invalide_name.q @@ -0,0 +1,3 @@ +-- max allowed length for constraint name is 255 +create table t (i int, j string constraint aaaaabcdatyaaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaa + default 'def_value'); diff --git a/ql/src/test/queries/clientnegative/constraint_partition_columns.q b/ql/src/test/queries/clientnegative/constraint_partition_columns.q new file mode 100644 index 0000000000..81e74ff122 --- /dev/null +++ b/ql/src/test/queries/clientnegative/constraint_partition_columns.q @@ -0,0 +1,2 @@ +-- partition columns aren't allowed to have not null or default constraints +create table tpart(i int default 5, j int not null enable) partitioned by (s string not null); diff --git a/ql/src/test/queries/clientnegative/create_external_with_default_constraint.q b/ql/src/test/queries/clientnegative/create_external_with_default_constraint.q new file mode 100644 index 0000000000..4690c2cb0b --- /dev/null +++ b/ql/src/test/queries/clientnegative/create_external_with_default_constraint.q @@ -0,0 +1 @@ +CREATE external TABLE table1 (a INT DEFAULT 56, b STRING); diff --git a/ql/src/test/queries/clientnegative/create_external_with_constraint.q b/ql/src/test/queries/clientnegative/create_external_with_notnull_constraint.q similarity index 100% rename from ql/src/test/queries/clientnegative/create_external_with_constraint.q rename to ql/src/test/queries/clientnegative/create_external_with_notnull_constraint.q diff --git a/ql/src/test/queries/clientnegative/default_constraint_complex_default_value.q b/ql/src/test/queries/clientnegative/default_constraint_complex_default_value.q new file mode 100644 index 0000000000..76c32aab12 --- /dev/null +++ b/ql/src/test/queries/clientnegative/default_constraint_complex_default_value.q @@ -0,0 +1,2 @@ +-- default for complex types are not allowed +create table t (i int, j array default array(1.3, 2.3)); diff --git a/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value.q b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value.q new file mode 100644 index 0000000000..ff00eceb25 --- /dev/null +++ b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value.q @@ -0,0 +1,2 @@ +-- reference to other column in default value is not allowed +create table t (i int, j double default cast(i as double)); diff --git a/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value2.q b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value2.q new file mode 100644 index 0000000000..ec5b67ae0f --- /dev/null +++ b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value2.q @@ -0,0 +1,2 @@ +-- only certain UDFs are allowed as default +create table t (i int, j string default repeat('s', 4)); diff --git a/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value_length.q b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value_length.q new file mode 100644 index 0000000000..a2a49ff6b1 --- /dev/null +++ b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value_length.q @@ -0,0 +1,4 @@ +-- max allowed length for default value is 255 +-- create default with length 256 +create table t (i int, j string default + '12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234'); diff --git a/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value_type.q b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value_type.q new file mode 100644 index 0000000000..1f1a9db846 --- /dev/null +++ b/ql/src/test/queries/clientnegative/default_constraint_invalid_default_value_type.q @@ -0,0 +1,2 @@ +-- year() isn't valid +create table t (i int, j string default cast(year("1970-01-01") as string)); diff --git a/ql/src/test/queries/clientnegative/default_constraint_invalid_type.q b/ql/src/test/queries/clientnegative/default_constraint_invalid_type.q new file mode 100644 index 0000000000..d1afb684a7 --- /dev/null +++ b/ql/src/test/queries/clientnegative/default_constraint_invalid_type.q @@ -0,0 +1 @@ +create table t (i int, j double default 1); diff --git a/ql/src/test/queries/clientpositive/create_with_constraints.q b/ql/src/test/queries/clientpositive/create_with_constraints.q index 7b2594b79f..1152294165 100644 --- a/ql/src/test/queries/clientpositive/create_with_constraints.q +++ b/ql/src/test/queries/clientpositive/create_with_constraints.q @@ -24,6 +24,7 @@ CREATE TABLE table20 (a STRING, b STRING, CONSTRAINT uk20_1 UNIQUE (b) DISABLE R CREATE TABLE table21 (a STRING, CONSTRAINT uk21_1 UNIQUE (a,b) DISABLE) PARTITIONED BY (b STRING); CREATE TABLE table22 (a STRING, b STRING, CONSTRAINT fk22_1 FOREIGN KEY (a,b) REFERENCES table21(a,b) DISABLE); + DESCRIBE EXTENDED table1; DESCRIBE EXTENDED table2; DESCRIBE EXTENDED table3; diff --git a/ql/src/test/queries/clientpositive/default_constraint.q b/ql/src/test/queries/clientpositive/default_constraint.q new file mode 100644 index 0000000000..c16dda329b --- /dev/null +++ b/ql/src/test/queries/clientpositive/default_constraint.q @@ -0,0 +1,180 @@ +-- create table + -- numeric type + set hive.stats.autogather=false; + set hive.support.concurrency=true; + set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +DESC FORMATTED numericDataType; + +EXPLAIN INSERT INTO numericDataType(a) values(3Y); +INSERT INTO numericDataType(a) values(3Y); +SELECT * FROM numericDataType; + +EXPLAIN INSERT INTO numericDataType(e,f) values(4.5, 678.4); +INSERT INTO numericDataType(e,f) values(4.5, 678.4); +SELECT * FROM numericDataType; + +DROP TABLE numericDataType; + + -- Date/time +CREATE TABLE table1(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000', + tz timestamp with local time zone DEFAULT TIMESTAMPLOCALTZ'2016-01-03 12:26:34 America/Los_Angeles', + d1 DATE DEFAULT current_date() ENABLE, t1 TIMESTAMP DEFAULT current_timestamp() DISABLE); +DESC FORMATTED table1; + +EXPLAIN INSERT INTO table1(t) values ("1985-12-31 12:45:07"); +INSERT INTO table1(t) values ("1985-12-31 12:45:07"); +SELECT d, t, tz,d1=current_date(), t1 from table1; + +EXPLAIN INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259'); +INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259'); +SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1; + +DROP TABLE table1; + +-- string type +CREATE TABLE table2(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(), + k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT cast('varchar_default_value' as varchar(350)), + c char(20) DEFAULT cast('char_value' as char(20))) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +DESC FORMATTED table2; +EXPLAIN INSERT INTO table2(i) values('default'); +INSERT INTO table2(i) values('default'); +SELECT i,j=current_user(),k,v,c FROM table2; + +EXPLAIN INSERT INTO table2(v, c) values('varchar_default2', 'char'); +INSERT INTO table2(v, c) values('varchar_default2', 'char'); +SELECT i,j=current_user(),k,v,c FROM table2; +DROP TABLE table2; + + +-- misc type +CREATE TABLE misc(b BOOLEAN DEFAULT true, b1 BINARY DEFAULT cast('bin' as binary)) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +DESC FORMATTED misc; +EXPLAIN INSERT INTO misc(b) values(false); +INSERT INTO misc(b) values(false); +SELECT b, b1 from misc; +EXPLAIN INSERT INTO misc(b1) values('011'); +INSERT INTO misc(b) values(false); +SELECT b, b1 from misc; +DROP TABLE misc; + +-- CAST +CREATE table t11(i int default cast(cast(4 as double) as int), + b1 boolean default cast ('true' as boolean), b2 int default cast (5.67 as int), + b3 tinyint default cast (45 as tinyint), b4 float default cast (45.4 as float), + b5 bigint default cast (567 as bigint), b6 smallint default cast (88 as smallint), + j varchar(50) default cast(current_timestamp() as varchar(50)), + k string default cast(cast(current_user() as varchar(50)) as string), + tz1 timestamp with local time zone DEFAULT cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), + ts timestamp default cast('2016-01-01 12:01:01' as timestamp), + dc decimal(8,2) default cast(4.5 as decimal(8,2)), + c2 double default cast(5 as double), c4 char(2) default cast(cast(cast('ab' as string) as varchar(2)) as char(2))); +DESC FORMATTED t11; +EXPLAIN INSERT INTO t11(c4) values('vi'); +INSERT INTO t11(c4) values('vi'); +SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11; + +EXPLAIN INSERT INTO t11(b1,c4) values(true,'ga'); +INSERT INTO t11(c4) values('vi'); +SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11; + +DROP TABLE t11; + +-- alter table +-- drop constraint +CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint; +DESC FORMATTED numericDataType; + +EXPLAIN INSERT INTO numericDataType(b) values(456); +INSERT INTO numericDataType(b) values(456); +SELECT * from numericDataType; + +-- add another constraint on same column +ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; +DESC FORMATTED numericDataType; +EXPLAIN INSERT INTO numericDataType(b) values(56); +INSERT INTO numericDataType(b) values(456); +SELECT * from numericDataType; + +-- alter table change column with constraint to add NOT NULL and then DEFAULT +ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT second_null_constraint NOT NULL ENABLE; +DESC FORMATTED numericDataType; +ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 127Y ENABLE; +DESC FORMATTED numericDataType; +EXPLAIN INSERT INTO numericDataType(f) values(847.45); --plan should have both DEFAULT and NOT NULL +INSERT INTO numericDataType(f) values(847.45); +Select * from numericDataType; +DESC FORMATTED numericDataType; + +-- drop constraint and add with same name again +ALTER TABLE numericDataType DROP CONSTRAINT default_constraint; +DESC FORMATTED numericDataType; +ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 108Y ENABLE; +DESC FORMATTED numericDataType; +EXPLAIN INSERT INTO numericDataType(f) values(847.45); +INSERT INTO numericDataType(f) values(847.45); +Select * from numericDataType; +DROP TABLE numericDataType; + +-- create default with maximum length allowed for default val (255) +create table t (i int, j string default + '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123'); +desc formatted t; +explain insert into t(i) values(3); +insert into t(i) values(3); +select * from t; +drop table t; + +-- partitioned table +set hive.exec.dynamic.partition.mode=nonstrict; +-- Table with partition +CREATE TABLE tablePartitioned (a STRING NOT NULL ENFORCED, url STRING constraint bdc1 default 'http://localhost', + c STRING NOT NULL ENFORCED) + PARTITIONED BY (p1 STRING, p2 INT); + +-- Insert into +explain INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint'); +INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint'); +DROP TABLE tablePartitioned; + +-- try constraint with direct sql as false +set hive.metastore.try.direct.sql=false; +CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); +ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint; +DESC FORMATTED numericDataType; + +EXPLAIN INSERT INTO numericDataType(b) values(456); +INSERT INTO numericDataType(b) values(456); +SELECT * from numericDataType; + +-- add another constraint on same column +ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE; +DESC FORMATTED numericDataType; +EXPLAIN INSERT INTO numericDataType(b) values(56); +INSERT INTO numericDataType(b) values(456); +SELECT * from numericDataType; +DROP TABLE numericDataType; + +-- Following all are existing BUGS +-- BUG1: alter table change constraint doesn't work, so following not working +-- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint DEFAULT 1Y ENABLE; -- change default val +-- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y ENABLE; -- change constraint name +-- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y DISABLE; -- DISABLE constraint +-- BUG2: ADD column not working +-- ALTER TABLE numericDataType add columns (dd double); +--BUG3: Following add multiple constraints +--ALTER TABLE numericDataType CHANGE c c INT DEFAULT cast(4.5 as INT); +-- BUG4 Replace column doesn't work, so following not workiing +-- alter table numericDataType replace columns (a TINYINT); +-- BUG5: select current_database() as default doesn't work + diff --git a/ql/src/test/results/clientnegative/alter_external_with_constraint.q.out b/ql/src/test/results/clientnegative/alter_external_with_default_constraint.q.out similarity index 100% rename from ql/src/test/results/clientnegative/alter_external_with_constraint.q.out rename to ql/src/test/results/clientnegative/alter_external_with_default_constraint.q.out diff --git a/ql/src/test/results/clientnegative/alter_external_with_notnull_constraint.q.out b/ql/src/test/results/clientnegative/alter_external_with_notnull_constraint.q.out new file mode 100644 index 0000000000..a6001a40bf --- /dev/null +++ b/ql/src/test/results/clientnegative/alter_external_with_notnull_constraint.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: CREATE external TABLE table1 (a STRING, b STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table1 +POSTHOOK: query: CREATE external TABLE table1 (a STRING, b STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table1 +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Constraints are disallowed with External tables. Only RELY is allowed. diff --git a/ql/src/test/results/clientnegative/alter_tableprops_external_with_default_constraint.q.out b/ql/src/test/results/clientnegative/alter_tableprops_external_with_default_constraint.q.out new file mode 100644 index 0000000000..de5e2763b4 --- /dev/null +++ b/ql/src/test/results/clientnegative/alter_tableprops_external_with_default_constraint.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: CREATE TABLE table1 (a STRING DEFAULT 'hive', b STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table1 +POSTHOOK: query: CREATE TABLE table1 (a STRING DEFAULT 'hive', b STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table1 +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Table: default.table1 has constraints enabled.Please remove those constraints to change this property. diff --git a/ql/src/test/results/clientnegative/alter_tableprops_external_with_constraint.q.out b/ql/src/test/results/clientnegative/alter_tableprops_external_with_notnull_constraint.q.out similarity index 100% rename from ql/src/test/results/clientnegative/alter_tableprops_external_with_constraint.q.out rename to ql/src/test/results/clientnegative/alter_tableprops_external_with_notnull_constraint.q.out diff --git a/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out b/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out new file mode 100644 index 0000000000..8a154f631e --- /dev/null +++ b/ql/src/test/results/clientnegative/constraint_duplicate_name.q.out @@ -0,0 +1,13 @@ +PREHOOK: query: create table t(i int constraint c1 not null enable) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t +POSTHOOK: query: create table t(i int constraint c1 not null enable) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t +PREHOOK: query: create table t1(j int constraint c1 default 4) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:Constraint name already exists: c1) diff --git a/ql/src/test/results/clientnegative/constraint_invalide_name.q.out b/ql/src/test/results/clientnegative/constraint_invalide_name.q.out new file mode 100644 index 0000000000..de749b4259 --- /dev/null +++ b/ql/src/test/results/clientnegative/constraint_invalide_name.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Constraint name: aaaaabcdatyaaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaabcdatyaaaa exceeded maximum allowed length: 255 diff --git a/ql/src/test/results/clientnegative/constraint_partition_columns.q.out b/ql/src/test/results/clientnegative/constraint_partition_columns.q.out new file mode 100644 index 0000000000..d629ed062e --- /dev/null +++ b/ql/src/test/results/clientnegative/constraint_partition_columns.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax NOT NULL and Default Constraints are not allowed with partition columns. diff --git a/ql/src/test/results/clientnegative/create_external_with_constraint.q.out b/ql/src/test/results/clientnegative/create_external_with_default_constraint.q.out similarity index 100% rename from ql/src/test/results/clientnegative/create_external_with_constraint.q.out rename to ql/src/test/results/clientnegative/create_external_with_default_constraint.q.out diff --git a/ql/src/test/results/clientnegative/create_external_with_notnull_constraint.q.out b/ql/src/test/results/clientnegative/create_external_with_notnull_constraint.q.out new file mode 100644 index 0000000000..e69566d503 --- /dev/null +++ b/ql/src/test/results/clientnegative/create_external_with_notnull_constraint.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Constraints are disallowed with External tables. Only RELY is allowed. diff --git a/ql/src/test/results/clientnegative/default_constraint_complex_default_value.q.out b/ql/src/test/results/clientnegative/default_constraint_complex_default_value.q.out new file mode 100644 index 0000000000..e178934b81 --- /dev/null +++ b/ql/src/test/results/clientnegative/default_constraint_complex_default_value.q.out @@ -0,0 +1 @@ +FAILED: ParseException line 2:31 cannot recognize input near 'default' '>' 'default' in list type diff --git a/ql/src/test/results/clientnegative/default_constraint_invalid_default_value.q.out b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value.q.out new file mode 100644 index 0000000000..af727dcaad --- /dev/null +++ b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Invalid Default value! diff --git a/ql/src/test/results/clientnegative/default_constraint_invalid_default_value2.q.out b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value2.q.out new file mode 100644 index 0000000000..76e5aeb2ec --- /dev/null +++ b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value2.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Invalid Default value: repeat('s', 4). DEFAULT only allows constant or function expressions diff --git a/ql/src/test/results/clientnegative/default_constraint_invalid_default_value_length.q.out b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value_length.q.out new file mode 100644 index 0000000000..f08a6ac673 --- /dev/null +++ b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value_length.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Invalid Default value: '12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234'Maximum character length allowed is 255 . diff --git a/ql/src/test/results/clientnegative/default_constraint_invalid_default_value_type.q.out b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value_type.q.out new file mode 100644 index 0000000000..c60bc02fdc --- /dev/null +++ b/ql/src/test/results/clientnegative/default_constraint_invalid_default_value_type.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Invalid Default value: UDFToString(year('1970-01-01')). DEFAULT only allows constant or function expressions diff --git a/ql/src/test/results/clientnegative/default_constraint_invalid_type.q.out b/ql/src/test/results/clientnegative/default_constraint_invalid_type.q.out new file mode 100644 index 0000000000..4e4a7b0481 --- /dev/null +++ b/ql/src/test/results/clientnegative/default_constraint_invalid_type.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10326]: Invalid Constraint syntax Invalid type: int for default value: 1. Please make sure that the type is compatible with column type: double diff --git a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out index 8b72cafd59..d22646bd9e 100644 --- a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out +++ b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: concat(key, value) (type: string), array(key,value) (type: array) @@ -79,16 +79,16 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: (UDFToDouble(_col0) + 1.0) (type: double) + key expressions: (UDFToDouble(_col0) + 1.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) + 1.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) + 1.0D) (type: double) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) TableScan @@ -112,7 +112,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) + 1.0) (type: double) + 0 (UDFToDouble(_col0) + 1.0D) (type: double) 1 UDFToDouble(_col0) (type: double) outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out index ab8c22b9be..970b7863ee 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out @@ -122,7 +122,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 2098 Data size: 16744 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((t = 1) and (si = 2)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 2) and (si = 3)) or ((t = 27) and (si = 28)) or ((t = 3) and (si = 4)) or ((t = 37) and (si = 38)) or ((t = 4) and (si = 5)) or ((t = 47) and (si = 48)) or ((t = 5) and (si = 6)) or ((t = 52) and (si = 53)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10))) (type: boolean) + predicate: (((t = 10Y) and (si = 11S)) or ((t = 11Y) and (si = 12S)) or ((t = 12Y) and (si = 13S)) or ((t = 13Y) and (si = 14S)) or ((t = 14Y) and (si = 15S)) or ((t = 15Y) and (si = 16S)) or ((t = 16Y) and (si = 17S)) or ((t = 17Y) and (si = 18S)) or ((t = 1Y) and (si = 2S)) or ((t = 27Y) and (si = 28S)) or ((t = 2Y) and (si = 3S)) or ((t = 37Y) and (si = 38S)) or ((t = 3Y) and (si = 4S)) or ((t = 47Y) and (si = 48S)) or ((t = 4Y) and (si = 5S)) or ((t = 52Y) and (si = 53S)) or ((t = 5Y) and (si = 6S)) or ((t = 6Y) and (si = 7S)) or ((t = 7Y) and (si = 8S)) or ((t = 9Y) and (si = 10S))) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -211,7 +211,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((t = 1) and (si = 2)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 2) and (si = 3)) or ((t = 27) and (si = 28)) or ((t = 3) and (si = 4)) or ((t = 37) and (si = 38)) or ((t = 4) and (si = 5)) or ((t = 47) and (si = 48)) or ((t = 5) and (si = 6)) or ((t = 52) and (si = 53)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10))) (type: boolean) + predicate: (((t = 10Y) and (si = 11S)) or ((t = 11Y) and (si = 12S)) or ((t = 12Y) and (si = 13S)) or ((t = 13Y) and (si = 14S)) or ((t = 14Y) and (si = 15S)) or ((t = 15Y) and (si = 16S)) or ((t = 16Y) and (si = 17S)) or ((t = 17Y) and (si = 18S)) or ((t = 1Y) and (si = 2S)) or ((t = 27Y) and (si = 28S)) or ((t = 2Y) and (si = 3S)) or ((t = 37Y) and (si = 38S)) or ((t = 3Y) and (si = 4S)) or ((t = 47Y) and (si = 48S)) or ((t = 4Y) and (si = 5S)) or ((t = 52Y) and (si = 53S)) or ((t = 5Y) and (si = 6S)) or ((t = 6Y) and (si = 7S)) or ((t = 7Y) and (si = 8S)) or ((t = 9Y) and (si = 10S))) (type: boolean) Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out index 25efe1e1f4..cb33177890 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out @@ -304,7 +304,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 32 Data size: 3136 Basic stats: COMPLETE Column stats: COMPLETE @@ -354,7 +354,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 2352 Basic stats: COMPLETE Column stats: COMPLETE @@ -404,7 +404,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 2352 Basic stats: COMPLETE Column stats: COMPLETE @@ -454,7 +454,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 8 Data size: 784 Basic stats: COMPLETE Column stats: COMPLETE @@ -504,7 +504,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE @@ -554,7 +554,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 2352 Basic stats: COMPLETE Column stats: COMPLETE @@ -604,7 +604,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 32 Data size: 3136 Basic stats: COMPLETE Column stats: COMPLETE @@ -703,7 +703,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE @@ -802,7 +802,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE @@ -852,7 +852,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE @@ -902,7 +902,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE @@ -952,7 +952,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE @@ -1002,7 +1002,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE @@ -1052,7 +1052,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE @@ -1102,7 +1102,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE @@ -1201,7 +1201,7 @@ STAGE PLANS: outputColumnNames: state, locid Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), locid (type: int), 0 (type: bigint) + keys: state (type: string), locid (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out index 78c4808f5f..b9831416a0 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out @@ -105,7 +105,7 @@ STAGE PLANS: outputColumnNames: state, country Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: state (type: string), country (type: string), 0 (type: bigint) + keys: state (type: string), country (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 80 Data size: 800 Basic stats: COMPLETE Column stats: NONE @@ -253,7 +253,7 @@ STAGE PLANS: outputColumnNames: state, country Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), country (type: string), 0 (type: bigint) + keys: state (type: string), country (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 40 Data size: 7240 Basic stats: COMPLETE Column stats: COMPLETE @@ -352,7 +352,7 @@ STAGE PLANS: outputColumnNames: state, country Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: state (type: string), country (type: string), 0 (type: bigint) + keys: state (type: string), country (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 80 Data size: 14480 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out index 9aaa6f68a2..cfec34649d 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out @@ -304,7 +304,7 @@ STAGE PLANS: alias: alltypes_orc Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 11 (type: bigint) + expressions: 11L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -430,7 +430,7 @@ STAGE PLANS: alias: alltypes_orc Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1 (type: tinyint), 20 (type: smallint) + expressions: 1Y (type: tinyint), 20S (type: smallint) outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -451,7 +451,7 @@ STAGE PLANS: alias: alltypes_orc Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1970-12-31 15:59:58.174 (type: timestamp) + expressions: TIMESTAMP'1970-12-31 15:59:58.174' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -472,7 +472,7 @@ STAGE PLANS: alias: alltypes_orc Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1970-12-31 (type: date) + expressions: DATE'1970-12-31' (type: date) outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/auto_join11.q.out b/ql/src/test/results/clientpositive/auto_join11.q.out index 7dd6af44eb..1beed50df2 100644 --- a/ql/src/test/results/clientpositive/auto_join11.q.out +++ b/ql/src/test/results/clientpositive/auto_join11.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -50,7 +50,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join12.q.out b/ql/src/test/results/clientpositive/auto_join12.q.out index 0e9fa989e2..7e4d39db1a 100644 --- a/ql/src/test/results/clientpositive/auto_join12.q.out +++ b/ql/src/test/results/clientpositive/auto_join12.q.out @@ -41,7 +41,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -57,7 +57,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join13.q.out b/ql/src/test/results/clientpositive/auto_join13.q.out index 45a0a5ed1b..bae1879fe2 100644 --- a/ql/src/test/results/clientpositive/auto_join13.q.out +++ b/ql/src/test/results/clientpositive/auto_join13.q.out @@ -41,7 +41,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -56,7 +56,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -74,7 +74,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join14.q.out b/ql/src/test/results/clientpositive/auto_join14.q.out index 52aaa023c5..6201071cb0 100644 --- a/ql/src/test/results/clientpositive/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/auto_join14.q.out @@ -34,7 +34,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -52,7 +52,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join16.q.out b/ql/src/test/results/clientpositive/auto_join16.q.out index f05de4197d..3cd4376392 100644 --- a/ql/src/test/results/clientpositive/auto_join16.q.out +++ b/ql/src/test/results/clientpositive/auto_join16.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -50,7 +50,7 @@ STAGE PLANS: alias: tab Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join27.q.out b/ql/src/test/results/clientpositive/auto_join27.q.out index ba67d1a8c6..1c5f573261 100644 --- a/ql/src/test/results/clientpositive/auto_join27.q.out +++ b/ql/src/test/results/clientpositive/auto_join27.q.out @@ -37,7 +37,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) @@ -78,7 +78,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -96,7 +96,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join33.q.out b/ql/src/test/results/clientpositive/auto_join33.q.out index a347fcdf9b..c0eaecd622 100644 --- a/ql/src/test/results/clientpositive/auto_join33.q.out +++ b/ql/src/test/results/clientpositive/auto_join33.q.out @@ -30,7 +30,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) + 1.0) < 10.0) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) + 1.0D) < 10.0D) and key is not null) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -38,8 +38,8 @@ STAGE PLANS: Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator keys: - 0 (UDFToDouble(_col0) + 1.0) (type: double) - 1 (UDFToDouble(_col0) + 2.0) (type: double) + 0 (UDFToDouble(_col0) + 1.0D) (type: double) + 1 (UDFToDouble(_col0) + 2.0D) (type: double) Stage: Stage-3 Map Reduce @@ -48,7 +48,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) + 2.0) < 10.0) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) + 2.0D) < 10.0D) and key is not null) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -58,8 +58,8 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) + 1.0) (type: double) - 1 (UDFToDouble(_col0) + 2.0) (type: double) + 0 (UDFToDouble(_col0) + 1.0D) (type: double) + 1 (UDFToDouble(_col0) + 2.0D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join4.q.out b/ql/src/test/results/clientpositive/auto_join4.q.out index 4b08f937e3..b5efad66f2 100644 --- a/ql/src/test/results/clientpositive/auto_join4.q.out +++ b/ql/src/test/results/clientpositive/auto_join4.q.out @@ -56,7 +56,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -74,7 +74,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join5.q.out b/ql/src/test/results/clientpositive/auto_join5.q.out index 03262fecde..f91cf7ac0f 100644 --- a/ql/src/test/results/clientpositive/auto_join5.q.out +++ b/ql/src/test/results/clientpositive/auto_join5.q.out @@ -56,7 +56,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -74,7 +74,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join6.q.out b/ql/src/test/results/clientpositive/auto_join6.q.out index d8c58d44f9..166ecdadf5 100644 --- a/ql/src/test/results/clientpositive/auto_join6.q.out +++ b/ql/src/test/results/clientpositive/auto_join6.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -66,7 +66,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join7.q.out b/ql/src/test/results/clientpositive/auto_join7.q.out index b426b8ceee..c8f71448c7 100644 --- a/ql/src/test/results/clientpositive/auto_join7.q.out +++ b/ql/src/test/results/clientpositive/auto_join7.q.out @@ -60,7 +60,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -92,7 +92,7 @@ STAGE PLANS: alias: src3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out index f4544ad3b9..063f2f3a88 100644 --- a/ql/src/test/results/clientpositive/auto_join8.q.out +++ b/ql/src/test/results/clientpositive/auto_join8.q.out @@ -56,7 +56,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -74,7 +74,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out index a8ae0007dd..ad5e1875ad 100644 --- a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out +++ b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out @@ -686,7 +686,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -704,7 +704,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean) + predicate: ((UDFToDouble(key) > 100.0D) and value is not null) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -889,7 +889,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean) + predicate: ((UDFToDouble(key) > 100.0D) and value is not null) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -907,7 +907,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -937,7 +937,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean) + predicate: ((UDFToDouble(key) > 100.0D) and value is not null) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -953,7 +953,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out index 9c23a76e47..e00e9edd85 100644 --- a/ql/src/test/results/clientpositive/cast1.q.out +++ b/ql/src/test/results/clientpositive/cast1.q.out @@ -30,10 +30,10 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 5 (type: int), 5.0 (type: double), 5.0 (type: double), 5.0 (type: double), 5 (type: int), 'TRUE' (type: string), 1 (type: int) + expressions: 5 (type: int), 5.0D (type: double), 5.0D (type: double), 5.0D (type: double), 5 (type: int), 'TRUE' (type: string), 1 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/cast_on_constant.q.out b/ql/src/test/results/clientpositive/cast_on_constant.q.out index 7f920f41af..b56da1d19f 100644 --- a/ql/src/test/results/clientpositive/cast_on_constant.q.out +++ b/ql/src/test/results/clientpositive/cast_on_constant.q.out @@ -22,10 +22,10 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (ts_field = 2016-01-23 00:00:00.0) (type: boolean) + predicate: (ts_field = TIMESTAMP'2016-01-23 00:00:00.0') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 2016-01-23 00:00:00.0 (type: timestamp), date_field (type: date) + expressions: TIMESTAMP'2016-01-23 00:00:00.0' (type: timestamp), date_field (type: date) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -58,10 +58,10 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (date_field = 2016-01-23) (type: boolean) + predicate: (date_field = DATE'2016-01-23') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: ts_field (type: timestamp), 2016-01-23 (type: date) + expressions: ts_field (type: timestamp), DATE'2016-01-23' (type: date) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -94,10 +94,10 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (ts_field = 2016-01-23 00:00:00.0) (type: boolean) + predicate: (ts_field = TIMESTAMP'2016-01-23 00:00:00.0') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: 2016-01-23 00:00:00.0 (type: timestamp), date_field (type: date) + expressions: TIMESTAMP'2016-01-23 00:00:00.0' (type: timestamp), date_field (type: date) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -130,10 +130,10 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (date_field = 2016-01-23) (type: boolean) + predicate: (date_field = DATE'2016-01-23') (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: ts_field (type: timestamp), 2016-01-23 (type: date) + expressions: ts_field (type: timestamp), DATE'2016-01-23' (type: date) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/cbo_const.q.out b/ql/src/test/results/clientpositive/cbo_const.q.out index cfc7f522d4..f13d17e8a7 100644 --- a/ql/src/test/results/clientpositive/cbo_const.q.out +++ b/ql/src/test/results/clientpositive/cbo_const.q.out @@ -158,7 +158,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 4.0) (type: boolean) + predicate: (UDFToDouble(key) = 4.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -235,7 +235,7 @@ STAGE PLANS: alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 3.0) (type: boolean) + predicate: (UDFToDouble(key) = 3.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -251,7 +251,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 3.0) and value is not null) (type: boolean) + predicate: ((UDFToDouble(key) = 3.0D) and value is not null) (type: boolean) Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -293,7 +293,7 @@ STAGE PLANS: alias: z Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hr) = 14.0) and (ds = '2008-04-08') and value is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 14.0D) and (ds = '2008-04-08') and value is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out index 0bc7d8764e..ec06e0e6c7 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out @@ -30,7 +30,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -328,7 +328,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -349,7 +349,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out b/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out index c553ce5ac6..d91e3db20f 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out @@ -512,7 +512,7 @@ STAGE PLANS: alias: bucket Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CASE WHEN ((key < 100.0)) THEN (NaN) ELSE (key) END (type: double) + expressions: CASE WHEN ((key < 100.0D)) THEN (NaND) ELSE (key) END (type: double) outputColumnNames: $f0 Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/columnstats_partlvl.q.out b/ql/src/test/results/clientpositive/columnstats_partlvl.q.out index 5c9d375a4e..7d1b433919 100644 --- a/ql/src/test/results/clientpositive/columnstats_partlvl.q.out +++ b/ql/src/test/results/clientpositive/columnstats_partlvl.q.out @@ -53,25 +53,25 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeeid, 'hll') - keys: 2000.0 (type: double) + keys: 2000.0D (type: double) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 2000.0 (type: double) + key expressions: 2000.0D (type: double) sort order: + - Map-reduce partition columns: 2000.0 (type: double) + Map-reduce partition columns: 2000.0D (type: double) Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: struct) Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0) - keys: 2000.0 (type: double) + keys: 2000.0D (type: double) mode: mergepartial outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: struct), 2000.0 (type: double) + expressions: _col1 (type: struct), 2000.0D (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -115,15 +115,15 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeeid, 'hll') - keys: 2000.0 (type: double) + keys: 2000.0D (type: double) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 2000.0 (type: double) + key expressions: 2000.0D (type: double) null sort order: a sort order: + - Map-reduce partition columns: 2000.0 (type: double) + Map-reduce partition columns: 2000.0D (type: double) Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col1 (type: struct) @@ -185,12 +185,12 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0) - keys: 2000.0 (type: double) + keys: 2000.0D (type: double) mode: mergepartial outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: struct), 2000.0 (type: double) + expressions: _col1 (type: struct), 2000.0D (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -301,25 +301,25 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeeid, 'hll') - keys: 4000.0 (type: double) + keys: 4000.0D (type: double) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 4000.0 (type: double) + key expressions: 4000.0D (type: double) sort order: + - Map-reduce partition columns: 4000.0 (type: double) + Map-reduce partition columns: 4000.0D (type: double) Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: struct) Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0) - keys: 4000.0 (type: double) + keys: 4000.0D (type: double) mode: mergepartial outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: struct), 4000.0 (type: double) + expressions: _col1 (type: struct), 4000.0D (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -363,15 +363,15 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeeid, 'hll') - keys: 4000.0 (type: double) + keys: 4000.0D (type: double) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 4000.0 (type: double) + key expressions: 4000.0D (type: double) null sort order: a sort order: + - Map-reduce partition columns: 4000.0 (type: double) + Map-reduce partition columns: 4000.0D (type: double) Statistics: Num rows: 3 Data size: 1050 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col1 (type: struct) @@ -433,12 +433,12 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0) - keys: 4000.0 (type: double) + keys: 4000.0D (type: double) mode: mergepartial outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: struct), 4000.0 (type: double) + expressions: _col1 (type: struct), 4000.0D (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 350 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -511,25 +511,25 @@ STAGE PLANS: Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeeid, 'hll'), compute_stats(employeename, 'hll') - keys: 2000.0 (type: double) + keys: 2000.0D (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 2000.0 (type: double) + key expressions: 2000.0D (type: double) sort order: + - Map-reduce partition columns: 2000.0 (type: double) + Map-reduce partition columns: 2000.0D (type: double) Statistics: Num rows: 13 Data size: 92 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: struct), _col2 (type: struct) Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - keys: 2000.0 (type: double) + keys: 2000.0D (type: double) mode: mergepartial outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: struct), _col2 (type: struct), 2000.0 (type: double) + expressions: _col1 (type: struct), _col2 (type: struct), 2000.0D (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out index d055be2c95..66b44bfbab 100644 --- a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out +++ b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out @@ -89,25 +89,25 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeename, 'hll'), compute_stats(employeeid, 'hll') - keys: 4000.0 (type: double), country (type: string) + keys: 4000.0D (type: double), country (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 4000.0 (type: double), _col1 (type: string) + key expressions: 4000.0D (type: double), _col1 (type: string) sort order: ++ - Map-reduce partition columns: 4000.0 (type: double), _col1 (type: string) + Map-reduce partition columns: 4000.0D (type: double), _col1 (type: string) Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: struct), _col3 (type: struct) Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1) - keys: 4000.0 (type: double), KEY._col1 (type: string) + keys: 4000.0D (type: double), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: struct), _col3 (type: struct), 4000.0 (type: double), _col1 (type: string) + expressions: _col2 (type: struct), _col3 (type: struct), 4000.0D (type: double), _col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 640 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -220,25 +220,25 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 1690 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: compute_stats(employeeid, 'hll') - keys: 2000.0 (type: double), country (type: string) + keys: 2000.0D (type: double), country (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 1690 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: 2000.0 (type: double), _col1 (type: string) + key expressions: 2000.0D (type: double), _col1 (type: string) sort order: ++ - Map-reduce partition columns: 2000.0 (type: double), _col1 (type: string) + Map-reduce partition columns: 2000.0D (type: double), _col1 (type: string) Statistics: Num rows: 3 Data size: 1690 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: struct) Reduce Operator Tree: Group By Operator aggregations: compute_stats(VALUE._col0) - keys: 2000.0 (type: double), KEY._col1 (type: string) + keys: 2000.0D (type: double), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 563 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: struct), 2000.0 (type: double), _col1 (type: string) + expressions: _col2 (type: struct), 2000.0D (type: double), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 563 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/comments.q.out b/ql/src/test/results/clientpositive/comments.q.out index 53a766ff21..6f2fa28cb9 100644 --- a/ql/src/test/results/clientpositive/comments.q.out +++ b/ql/src/test/results/clientpositive/comments.q.out @@ -95,7 +95,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 0.0) (type: boolean) + predicate: (UDFToDouble(key) > 0.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -116,7 +116,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 0.0) (type: boolean) + predicate: (UDFToDouble(key) > 0.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/constant_prop_3.q.out b/ql/src/test/results/clientpositive/constant_prop_3.q.out index b754f8e7b4..516fed7160 100644 --- a/ql/src/test/results/clientpositive/constant_prop_3.q.out +++ b/ql/src/test/results/clientpositive/constant_prop_3.q.out @@ -275,7 +275,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col5, _col6, _col7, _col9 Statistics: Num rows: 1 Data size: 18 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col6 = 0) or (_col9 is null and _col1 is not null and (_col7 >= _col6))) (type: boolean) + predicate: ((_col6 = 0L) or (_col9 is null and _col1 is not null and (_col7 >= _col6))) (type: boolean) Statistics: Num rows: 1 Data size: 18 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col1 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int) diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out index c4fce25e41..41297ebc71 100644 --- a/ql/src/test/results/clientpositive/constantfolding.q.out +++ b/ql/src/test/results/clientpositive/constantfolding.q.out @@ -192,7 +192,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1970-12-31 15:59:58.174 (type: timestamp) + expressions: TIMESTAMP'1970-12-31 15:59:58.174' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -213,7 +213,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1970-12-31 (type: date) + expressions: DATE'1970-12-31' (type: date) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -267,7 +267,7 @@ STAGE PLANS: alias: dest1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1.098612288668 (type: double), null (type: double), null (type: double), 1.098612288668 (type: double), null (type: double), null (type: double), 1.584962500721 (type: double), null (type: double), null (type: double), 0.47712125472 (type: double), null (type: double), null (type: double), 1.584962500721 (type: double), null (type: double), null (type: double), null (type: double), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), 1.0 (type: double), 8.0 (type: double), 8.0 (type: double) + expressions: 1.098612288668D (type: double), null (type: double), null (type: double), 1.098612288668D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), 0.47712125472D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), null (type: double), -1.0D (type: double), 7.389056098931D (type: double), 8.0D (type: double), 8.0D (type: double), 0.125D (type: double), 8.0D (type: double), 2.0D (type: double), NaND (type: double), 1.0D (type: double), 1.0D (type: double), 8.0D (type: double), 8.0D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/constprog2.q.out b/ql/src/test/results/clientpositive/constprog2.q.out index cbc5fd202f..55603ce7ae 100644 --- a/ql/src/test/results/clientpositive/constprog2.q.out +++ b/ql/src/test/results/clientpositive/constprog2.q.out @@ -88,7 +88,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/constprog_type.q.out b/ql/src/test/results/clientpositive/constprog_type.q.out index 27ef1f482b..6c300855a1 100644 --- a/ql/src/test/results/clientpositive/constprog_type.q.out +++ b/ql/src/test/results/clientpositive/constprog_type.q.out @@ -35,7 +35,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2013-11-17 (type: date), 2011-04-29 20:46:56.4485 (type: timestamp) + expressions: DATE'2013-11-17' (type: date), TIMESTAMP'2011-04-29 20:46:56.4485' (type: timestamp) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 48000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/correlationoptimizer10.q.out b/ql/src/test/results/clientpositive/correlationoptimizer10.q.out index b5d2fe7ad5..bec6aab85f 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer10.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer10.q.out @@ -386,7 +386,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -401,7 +401,7 @@ STAGE PLANS: alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -440,7 +440,7 @@ STAGE PLANS: alias: xx Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -547,7 +547,7 @@ STAGE PLANS: alias: xx Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -563,7 +563,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -578,7 +578,7 @@ STAGE PLANS: alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -706,7 +706,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 180.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 180.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -721,7 +721,7 @@ STAGE PLANS: alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 180.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 180.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -760,7 +760,7 @@ STAGE PLANS: alias: xx Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 180.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 180.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -879,7 +879,7 @@ STAGE PLANS: alias: xx Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 180.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 180.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -895,7 +895,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 180.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 180.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -910,7 +910,7 @@ STAGE PLANS: alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 200.0) and (UDFToDouble(key) > 180.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 200.0D) and (UDFToDouble(key) > 180.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/correlationoptimizer8.q.out b/ql/src/test/results/clientpositive/correlationoptimizer8.q.out index 9dd45f9def..34a92164e4 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer8.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer8.q.out @@ -30,7 +30,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -83,7 +83,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -123,7 +123,7 @@ STAGE PLANS: alias: x1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -222,7 +222,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -240,7 +240,7 @@ STAGE PLANS: alias: x1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -258,7 +258,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -426,7 +426,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -512,7 +512,7 @@ STAGE PLANS: alias: x1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 8 Data size: 61 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -635,7 +635,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -653,7 +653,7 @@ STAGE PLANS: alias: x1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 8 Data size: 61 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -848,7 +848,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -901,7 +901,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -941,7 +941,7 @@ STAGE PLANS: alias: x1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -1012,7 +1012,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -1102,7 +1102,7 @@ STAGE PLANS: alias: x1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out index 7461523541..9be2f5518c 100644 --- a/ql/src/test/results/clientpositive/create_view.q.out +++ b/ql/src/test/results/clientpositive/create_view.q.out @@ -193,7 +193,7 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 18.0) (type: boolean) + predicate: (UDFToDouble(key) = 18.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/cross_join_merge.q.out b/ql/src/test/results/clientpositive/cross_join_merge.q.out index 7241dfe45f..c57b2f89ee 100644 --- a/ql/src/test/results/clientpositive/cross_join_merge.q.out +++ b/ql/src/test/results/clientpositive/cross_join_merge.q.out @@ -267,7 +267,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (5.0 = UDFToDouble(key)) (type: boolean) + predicate: (5.0D = UDFToDouble(key)) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out index 144811d866..d2565fce76 100644 --- a/ql/src/test/results/clientpositive/ctas_colname.q.out +++ b/ql/src/test/results/clientpositive/ctas_colname.q.out @@ -18,7 +18,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double), concat(value, value) (type: string) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double), concat(value, value) (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit @@ -517,7 +517,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1123,7 +1123,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 9.0) (type: boolean) + predicate: (UDFToDouble(key) < 9.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(value) @@ -1253,7 +1253,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 9.0) (type: boolean) + predicate: (UDFToDouble(key) < 9.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) diff --git a/ql/src/test/results/clientpositive/cte_5.q.out b/ql/src/test/results/clientpositive/cte_5.q.out index 1c7812e128..e99f1e73c2 100644 --- a/ql/src/test/results/clientpositive/cte_5.q.out +++ b/ql/src/test/results/clientpositive/cte_5.q.out @@ -88,7 +88,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(colnum) = 5.0) (type: boolean) + predicate: (UDFToDouble(colnum) = 5.0D) (type: boolean) Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out index 3f84a00b1c..e451a186fc 100644 --- a/ql/src/test/results/clientpositive/decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/decimal_udf.q.out @@ -174,7 +174,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -241,7 +241,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) + 1.0) (type: double) + expressions: (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -442,7 +442,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -509,7 +509,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) - 1.0) (type: double) + expressions: (UDFToDouble(key) - 1.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -765,7 +765,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -832,7 +832,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) * 2.0) (type: double) + expressions: (UDFToDouble(key) * 2.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -1090,7 +1090,7 @@ STAGE PLANS: predicate: (value <> 0) (type: boolean) Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink @@ -1143,7 +1143,7 @@ STAGE PLANS: alias: decimal_udf Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (1.0 + (UDFToDouble(key) / 2.0)) (type: double) + expressions: (1.0D + (UDFToDouble(key) / 2.0D)) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE ListSink diff --git a/ql/src/test/results/clientpositive/decimal_udf2.q.out b/ql/src/test/results/clientpositive/decimal_udf2.q.out index b6c2db576c..e873636c37 100644 --- a/ql/src/test/results/clientpositive/decimal_udf2.q.out +++ b/ql/src/test/results/clientpositive/decimal_udf2.q.out @@ -47,7 +47,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), null (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double) + expressions: null (type: double), null (type: double), 1.4711276743037347D (type: double), -0.8390715290764524D (type: double), -0.5440211108893698D (type: double), 0.6483608274590866D (type: double), 0.17453292519943295D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -104,7 +104,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out b/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out index f82ecffec7..19d90082bf 100644 --- a/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out +++ b/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out @@ -127,7 +127,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp), (floor((1.0 / rand())) % 6) (type: bigint) + expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp), (floor((1.0D / rand())) % 6) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, __time_granularity, __druid_extra_partition_key Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -355,7 +355,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp), (floor((1.0 / rand())) % 6) (type: bigint) + expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp), (floor((1.0D / rand())) % 6) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, __time_granularity, __druid_extra_partition_key Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -492,7 +492,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp), (floor((1.0 / rand())) % 6) (type: bigint) + expressions: _col0 (type: timestamp with local time zone), _col1 (type: string), _col2 (type: string), _col3 (type: double), _col4 (type: float), _col5 (type: tinyint), _col6 (type: smallint), _col7 (type: int), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: boolean), floor_hour(CAST( GenericUDFEpochMilli(_col0) AS TIMESTAMP)) (type: timestamp), (floor((1.0D / rand())) % 6) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, __time_granularity, __druid_extra_partition_key Statistics: Num rows: 12288 Data size: 2601650 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out index 08b64d6d82..eb2b83fec4 100644 --- a/ql/src/test/results/clientpositive/druid_basic2.q.out +++ b/ql/src/test/results/clientpositive/druid_basic2.q.out @@ -723,13 +723,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean) + filterExpr: floor_day(__time) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean) properties: druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} druid.query.type select Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean) + predicate: floor_day(__time) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: robot (type: string), floor_day(__time) (type: timestamp with local time zone) @@ -817,13 +817,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean) + filterExpr: floor_day(extract) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean) properties: druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]} druid.query.type groupBy Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 US/Pacific AND 1999-11-10 00:00:00.0 US/Pacific (type: boolean) + predicate: floor_day(extract) BETWEEN TIMESTAMPLOCALTZ'1999-11-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'1999-11-10 00:00:00.0 US/Pacific' (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: robot (type: string), extract (type: timestamp with local time zone) diff --git a/ql/src/test/results/clientpositive/druid_basic3.q.out b/ql/src/test/results/clientpositive/druid_basic3.q.out index ce6d0aafed..9c4cbb55f9 100644 --- a/ql/src/test/results/clientpositive/druid_basic3.q.out +++ b/ql/src/test/results/clientpositive/druid_basic3.q.out @@ -197,7 +197,7 @@ STAGE PLANS: druid.query.type groupBy Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: language (type: string), ($f1 + 100.0) (type: double) + expressions: language (type: string), ($f1 + 100.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator @@ -255,7 +255,7 @@ STAGE PLANS: druid.query.type groupBy Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: language (type: string), (-1.0 * ((($f1 - $f2) / UDFToDouble(($f3 * 3))) + $f4)) (type: double) + expressions: language (type: string), (-1.0D * ((($f1 - $f2) / UDFToDouble(($f3 * 3L))) + $f4)) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator @@ -457,7 +457,7 @@ STAGE PLANS: druid.query.type groupBy Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: language (type: string), ($f1 + 100.0) (type: double), (($f1 + 100.0) - $f2) (type: double) + expressions: language (type: string), ($f1 + 100.0D) (type: double), (($f1 + 100.0D) - $f2) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out index c94cbe9bd4..0a0b1d31dc 100644 --- a/ql/src/test/results/clientpositive/druid_intervals.q.out +++ b/ql/src/test/results/clientpositive/druid_intervals.q.out @@ -353,13 +353,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: ((__time) IN (2010-01-01 00:00:00.0 US/Pacific, 2011-01-01 00:00:00.0 US/Pacific) or (robot = 'user1')) (type: boolean) + filterExpr: ((__time) IN (TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific', TIMESTAMPLOCALTZ'2011-01-01 00:00:00.0 US/Pacific') or (robot = 'user1')) (type: boolean) properties: druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} druid.query.type select Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((__time) IN (2010-01-01 00:00:00.0 US/Pacific, 2011-01-01 00:00:00.0 US/Pacific) or (robot = 'user1')) (type: boolean) + predicate: ((__time) IN (TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific', TIMESTAMPLOCALTZ'2011-01-01 00:00:00.0 US/Pacific') or (robot = 'user1')) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: __time (type: timestamp with local time zone), robot (type: string) diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out index 785cbd2114..19a5af3a56 100644 --- a/ql/src/test/results/clientpositive/druid_timeseries.q.out +++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out @@ -24,13 +24,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: (((__time >= 2009-12-31 16:00:00.0 US/Pacific) and (__time <= 2012-02-29 16:00:00.0 US/Pacific)) or (added <= 0)) (type: boolean) + filterExpr: (((__time >= TIMESTAMPLOCALTZ'2009-12-31 16:00:00.0 US/Pacific') and (__time <= TIMESTAMPLOCALTZ'2012-02-29 16:00:00.0 US/Pacific')) or (added <= 0)) (type: boolean) properties: druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} druid.query.type select Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (((__time >= 2009-12-31 16:00:00.0 US/Pacific) and (__time <= 2012-02-29 16:00:00.0 US/Pacific)) or (added <= 0)) (type: boolean) + predicate: (((__time >= TIMESTAMPLOCALTZ'2009-12-31 16:00:00.0 US/Pacific') and (__time <= TIMESTAMPLOCALTZ'2012-02-29 16:00:00.0 US/Pacific')) or (added <= 0)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -443,13 +443,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean) + filterExpr: floor_hour(__time) BETWEEN TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'2014-01-01 00:00:00.0 US/Pacific' (type: boolean) properties: druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["added","variation"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} druid.query.type select Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean) + predicate: floor_hour(__time) BETWEEN TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'2014-01-01 00:00:00.0 US/Pacific' (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: floor_hour(__time) (type: timestamp with local time zone), added (type: float), variation (type: float) @@ -520,13 +520,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean) + filterExpr: floor_hour(__time) BETWEEN TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'2014-01-01 00:00:00.0 US/Pacific' (type: boolean) properties: druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["added","variation"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} druid.query.type select Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean) + predicate: floor_hour(__time) BETWEEN TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'2014-01-01 00:00:00.0 US/Pacific' (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: floor_hour(__time) (type: timestamp with local time zone), added (type: float), variation (type: float) diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out index 90eec3dd49..7a4c1f9f56 100644 --- a/ql/src/test/results/clientpositive/druid_topn.q.out +++ b/ql/src/test/results/clientpositive/druid_topn.q.out @@ -360,13 +360,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: druid_table_1 - filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean) + filterExpr: floor_hour(__time) BETWEEN TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'2014-01-01 00:00:00.0 US/Pacific' (type: boolean) properties: druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":["added","variation"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}} druid.query.type select Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 US/Pacific AND 2014-01-01 00:00:00.0 US/Pacific (type: boolean) + predicate: floor_hour(__time) BETWEEN TIMESTAMPLOCALTZ'2010-01-01 00:00:00.0 US/Pacific' AND TIMESTAMPLOCALTZ'2014-01-01 00:00:00.0 US/Pacific' (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: robot (type: string), floor_hour(__time) (type: timestamp with local time zone), added (type: float), variation (type: float) diff --git a/ql/src/test/results/clientpositive/except_all.q.out b/ql/src/test/results/clientpositive/except_all.q.out index c3310f5154..ae8e92f612 100644 --- a/ql/src/test/results/clientpositive/except_all.q.out +++ b/ql/src/test/results/clientpositive/except_all.q.out @@ -251,7 +251,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -358,7 +358,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -428,7 +428,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -528,7 +528,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -587,7 +587,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -600,7 +600,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 159 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 15 Data size: 159 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -659,7 +659,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 132 Data size: 1402 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 22 Data size: 233 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -703,7 +703,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -743,7 +743,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -783,7 +783,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -851,7 +851,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), 2 (type: bigint), _col1 (type: bigint) + expressions: _col0 (type: int), 2L (type: bigint), _col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -910,7 +910,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col1 * 2) = _col2) and (_col1 > 0)) (type: boolean) + predicate: (((_col1 * 2) = _col2) and (_col1 > 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -957,7 +957,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), 1 (type: bigint), _col1 (type: bigint) + expressions: _col0 (type: int), 1L (type: bigint), _col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out index 8fea7fdf44..25c620bbc5 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_date.q.out @@ -71,7 +71,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (d_date > 1900-01-02) (type: boolean) + predicate: (d_date > DATE'1900-01-02') (type: boolean) Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out index 8b95287b41..6ab11b152b 100644 --- a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out +++ b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out @@ -275,7 +275,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 1.0) (type: boolean) + predicate: (UDFToDouble(key) = 1.0D) (type: boolean) Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), c_int (type: int), c_float (type: float) @@ -291,7 +291,7 @@ STAGE PLANS: alias: t3 Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 1.0) and (c_int = 1)) (type: boolean) + predicate: ((UDFToDouble(key) = 1.0D) and (c_int = 1)) (type: boolean) Statistics: Num rows: 5 Data size: 65 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), c_float (type: float) @@ -333,7 +333,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 1.0) (type: boolean) + predicate: (UDFToDouble(key) = 1.0D) (type: boolean) Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out b/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out index 49e0d35d39..fc9a8ae140 100644 --- a/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out +++ b/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out @@ -42,10 +42,10 @@ STAGE PLANS: alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (('RAIL' = l_shipmode) and (1996-03-30 = to_date(l_shipdate))) (type: boolean) + predicate: (('RAIL' = l_shipmode) and (DATE'1996-03-30' = to_date(l_shipdate))) (type: boolean) Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: l_orderkey (type: int), (UDFToDouble(l_partkey) / 1000000.0) (type: double) + expressions: l_orderkey (type: int), (UDFToDouble(l_partkey) / 1000000.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out index 55d2a4d04b..85321780f6 100644 --- a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out @@ -30,7 +30,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -324,7 +324,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -345,7 +345,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -622,7 +622,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -643,7 +643,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -916,7 +916,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -937,7 +937,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/fp_literal_arithmetic.q.out b/ql/src/test/results/clientpositive/fp_literal_arithmetic.q.out index 19b078a841..e343fd6530 100644 --- a/ql/src/test/results/clientpositive/fp_literal_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/fp_literal_arithmetic.q.out @@ -18,7 +18,7 @@ STAGE PLANS: alias: q0 Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: l_discount BETWEEN 0.05 AND 0.07 (type: boolean) + predicate: l_discount BETWEEN 0.05D AND 0.07D (type: boolean) Statistics: Num rows: 11 Data size: 1319 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_extendedprice (type: double) @@ -84,7 +84,7 @@ STAGE PLANS: alias: q1 Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: l_discount BETWEEN 0.05 AND 0.07 (type: boolean) + predicate: l_discount BETWEEN 0.05D AND 0.07D (type: boolean) Statistics: Num rows: 11 Data size: 1319 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_extendedprice (type: double) diff --git a/ql/src/test/results/clientpositive/gby_star.q.out b/ql/src/test/results/clientpositive/gby_star.q.out index ade6d73f71..5c190f728e 100644 --- a/ql/src/test/results/clientpositive/gby_star.q.out +++ b/ql/src/test/results/clientpositive/gby_star.q.out @@ -92,7 +92,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(key) @@ -167,7 +167,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(key) @@ -245,7 +245,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -260,7 +260,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out index 37b8c62edd..0ea39f637f 100644 --- a/ql/src/test/results/clientpositive/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/groupby_cube1.q.out @@ -37,7 +37,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -96,7 +96,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -181,7 +181,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -265,7 +265,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -338,7 +338,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -448,7 +448,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -572,7 +572,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -588,7 +588,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out index e6126ab2c6..fbc0b65e39 100644 --- a/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out +++ b/ql/src/test/results/clientpositive/groupby_cube_multi_gby.q.out @@ -52,7 +52,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: string), value (type: string), 0 (type: bigint) + keys: key (type: string), value (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -66,7 +66,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: string), value (type: string), 0 (type: bigint) + keys: key (type: string), value (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out index ac43413400..f3f2458d25 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out @@ -45,29 +45,29 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col2 = 1) (type: boolean) + predicate: (_col2 = 1L) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), 1 (type: bigint) + key expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint) sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), 1 (type: bigint) + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), 1L (type: bigint) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: int), 1 (type: bigint) + keys: KEY._col0 (type: int), KEY._col1 (type: int), 1L (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE pruneGroupingSetId: true Select Operator - expressions: _col0 (type: int), _col1 (type: int), 1 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -135,7 +135,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -153,10 +153,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col2 = 1) (type: boolean) + predicate: (_col2 = 1L) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), 1 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out index 62fc9fb550..50ede0486b 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out @@ -53,7 +53,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -137,7 +137,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -221,7 +221,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -305,7 +305,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE @@ -384,7 +384,7 @@ STAGE PLANS: outputColumnNames: a, b, c Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: a (type: string), b (type: string), c (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 1080 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out index 43e17ec6bc..a41c7b8332 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out @@ -52,7 +52,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -137,7 +137,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -246,7 +246,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -378,7 +378,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 168 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out index 352d4beb9f..e894205a27 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out @@ -46,7 +46,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(c), count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4 Data size: 2880 Basic stats: COMPLETE Column stats: NONE @@ -106,7 +106,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 720 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(c), count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4 Data size: 2880 Basic stats: COMPLETE Column stats: NONE @@ -205,7 +205,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: avg(VALUE._col0), count(VALUE._col1) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4 Data size: 2880 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out index e368e1ee12..0caaf67235 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out @@ -42,11 +42,11 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -119,11 +119,11 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -189,11 +189,11 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -266,11 +266,11 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -367,7 +367,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -384,7 +384,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -468,7 +468,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -485,7 +485,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out index 1766fb9bbc..fcd394178d 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out @@ -56,7 +56,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -145,7 +145,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -284,7 +284,7 @@ STAGE PLANS: Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out index 563b110a11..6e7c568f93 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out @@ -36,10 +36,10 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) = 5.0) (type: boolean) + predicate: (UDFToDouble(a) = 5.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE @@ -105,10 +105,10 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(a) = 5.0) (type: boolean) + predicate: (UDFToDouble(a) = 5.0D) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out index 1f2cd456c8..9e0c9cb95b 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets_grouping.q.out @@ -40,7 +40,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -122,7 +122,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -211,7 +211,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -298,7 +298,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -407,7 +407,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -423,7 +423,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1) (type: bigint), grouping(_col2, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -489,7 +489,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -505,7 +505,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1) (type: bigint), grouping(_col2, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -578,12 +578,12 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (grouping(_col2, 1) = 1) (type: boolean) + predicate: (grouping(_col2, 1L) = 1) (type: boolean) Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) @@ -662,12 +662,12 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((grouping(_col2, 0) = 1) or (grouping(_col2, 1) = 1)) (type: boolean) + predicate: ((grouping(_col2, 0L) = 1) or (grouping(_col2, 1L) = 1)) (type: boolean) Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) @@ -681,7 +681,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1) + grouping(_col2, 0)) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1L) + grouping(_col2, 0L)) (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -696,7 +696,7 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col2 (type: bigint), CASE WHEN ((_col2 = 1)) THEN (_col0) END (type: int) + key expressions: _col2 (type: bigint), CASE WHEN ((_col2 = 1L)) THEN (_col0) END (type: int) sort order: -+ Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: int) @@ -787,7 +787,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), 0 (type: bigint), 0 (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), 0L (type: bigint), 0L (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -864,7 +864,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), 0L (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1002,7 +1002,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -1018,7 +1018,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1089,7 +1089,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -1105,7 +1105,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0, 1) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0L, 1L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1176,7 +1176,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -1192,7 +1192,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1258,7 +1258,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -1274,7 +1274,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0, 1) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0L, 1L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out index efa1802855..65159e62f1 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out @@ -37,7 +37,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -118,7 +118,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE @@ -199,7 +199,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE @@ -279,7 +279,7 @@ STAGE PLANS: outputColumnNames: a, b, c Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: a (type: string), b (type: string), c (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 1080 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_grouping_window.q.out b/ql/src/test/results/clientpositive/groupby_grouping_window.q.out index 0f58f5182a..89330d0d29 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_window.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_window.q.out @@ -49,7 +49,7 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 20 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(live), max(comments) - keys: category (type: int), 0 (type: bigint) + keys: category (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 40 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_position.q.out b/ql/src/test/results/clientpositive/groupby_position.q.out index d959df1ab1..3cd91e0682 100644 --- a/ql/src/test/results/clientpositive/groupby_position.q.out +++ b/ql/src/test/results/clientpositive/groupby_position.q.out @@ -589,7 +589,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 20.0) (type: boolean) + predicate: (UDFToDouble(key) <= 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -717,7 +717,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) @@ -803,7 +803,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/groupby_rollup1.q.out index a849a8d368..0c622dba6e 100644 --- a/ql/src/test/results/clientpositive/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/groupby_rollup1.q.out @@ -37,7 +37,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -116,7 +116,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -189,7 +189,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -293,7 +293,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -417,7 +417,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -433,7 +433,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out b/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out index dd53fe2140..8263dbd969 100644 --- a/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out +++ b/ql/src/test/results/clientpositive/groupby_rollup_empty.q.out @@ -169,7 +169,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(_col2) - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 0 Basic stats: PARTIAL Column stats: NONE diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out index 068b201173..0982c6d2a0 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out @@ -2387,7 +2387,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -7118,7 +7118,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 8.0) (type: boolean) + predicate: (UDFToDouble(key) = 8.0D) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out index 4ccfbb83c9..4ddc74bb6a 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out @@ -2526,7 +2526,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -7627,7 +7627,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 8.0) (type: boolean) + predicate: (UDFToDouble(key) = 8.0D) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) diff --git a/ql/src/test/results/clientpositive/having.q.out b/ql/src/test/results/clientpositive/having.q.out index 8a630751bb..c6f34c9158 100644 --- a/ql/src/test/results/clientpositive/having.q.out +++ b/ql/src/test/results/clientpositive/having.q.out @@ -41,7 +41,7 @@ STAGE PLANS: outputColumnNames: _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 3) (type: boolean) + predicate: (_col1 > 3L) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: bigint) @@ -95,7 +95,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <> 302.0) (type: boolean) + predicate: (UDFToDouble(key) <> 302.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) @@ -728,7 +728,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 300.0) (type: boolean) + predicate: (UDFToDouble(key) > 300.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) @@ -1204,7 +1204,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 >= 4) (type: boolean) + predicate: (_col1 >= 4L) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/having2.q.out b/ql/src/test/results/clientpositive/having2.q.out index 67f8af8328..12fae67586 100644 --- a/ql/src/test/results/clientpositive/having2.q.out +++ b/ql/src/test/results/clientpositive/having2.q.out @@ -155,7 +155,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col1 <= 4074689.000000041) and (_col3 <= 822)) (type: boolean) + predicate: ((_col1 <= 4074689.000000041D) and (_col3 <= 822L)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: double) @@ -222,7 +222,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col1 <= 4074689.000000041) and (_col3 <= 822)) (type: boolean) + predicate: ((_col1 <= 4074689.000000041D) and (_col3 <= 822L)) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: double) @@ -341,7 +341,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 275 Data size: 2921 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col1 <= 4074689.000000041) and (_col2 <= 822.0) and (_col3 > 4)) (type: boolean) + predicate: ((_col1 <= 4074689.000000041D) and (_col2 <= 822.0D) and (_col3 > 4L)) (type: boolean) Statistics: Num rows: 10 Data size: 106 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -464,7 +464,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3, _col4 Statistics: Num rows: 275 Data size: 2921 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col2 <= 4074689.000000041) and (_col3 <= 822.0) and (_col4 > 4)) (type: boolean) + predicate: ((_col2 <= 4074689.000000041D) and (_col3 <= 822.0D) and (_col4 > 4L)) (type: boolean) Statistics: Num rows: 10 Data size: 106 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col1 (type: string) @@ -587,7 +587,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3, _col4 Statistics: Num rows: 275 Data size: 2921 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col2 <= 4074689.000000041) and (_col3 <= 822.0) and (_col4 > 4)) (type: boolean) + predicate: ((_col2 <= 4074689.000000041D) and (_col3 <= 822.0D) and (_col4 > 4L)) (type: boolean) Statistics: Num rows: 10 Data size: 106 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col1 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/implicit_cast1.q.out b/ql/src/test/results/clientpositive/implicit_cast1.q.out index 6e1706a8a3..1e62fe3087 100644 --- a/ql/src/test/results/clientpositive/implicit_cast1.q.out +++ b/ql/src/test/results/clientpositive/implicit_cast1.q.out @@ -28,7 +28,7 @@ STAGE PLANS: alias: implicit_test1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (a <> 0) (type: boolean) + predicate: (a <> 0L) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: a (type: bigint), b (type: string) diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out index 80e14555da..b1250d3eb2 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out @@ -452,7 +452,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 99 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), UDFToString(_col1) (type: string), if(((UDFToDouble(_col0) % 100.0) = 0.0), '11', '12') (type: string) + expressions: _col0 (type: string), UDFToString(_col1) (type: string), if(((UDFToDouble(_col0) % 100.0D) = 0.0D), '11', '12') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 99 Data size: 58120 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out index 72249383ee..701ba2b166 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out @@ -39,7 +39,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), value (type: string), 0 (type: bigint) + keys: key (type: string), value (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE @@ -1518,7 +1518,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), value (type: string), 0 (type: bigint) + keys: key (type: string), value (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -1743,7 +1743,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), value (type: string), 0 (type: bigint) + keys: key (type: string), value (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/infer_const_type.q.out b/ql/src/test/results/clientpositive/infer_const_type.q.out index 647c910241..ed9a2aaa1c 100644 --- a/ql/src/test/results/clientpositive/infer_const_type.q.out +++ b/ql/src/test/results/clientpositive/infer_const_type.q.out @@ -59,10 +59,10 @@ STAGE PLANS: alias: infertypes Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(str) = 1234.0) and (bi = -12345) and (db = -307.0) and (fl = 906) and (i = 12345) and (si = 32767) and (ti = 127)) (type: boolean) + predicate: ((UDFToDouble(str) = 1234.0D) and (bi = -12345L) and (db = -307.0D) and (fl = 906) and (i = 12345) and (si = 32767S) and (ti = 127Y)) (type: boolean) Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 127 (type: tinyint), 32767 (type: smallint), 12345 (type: int), -12345 (type: bigint), 906.0 (type: float), -307.0 (type: double), str (type: string) + expressions: 127Y (type: tinyint), 32767S (type: smallint), 12345 (type: int), -12345L (type: bigint), 906.0 (type: float), -307.0D (type: double), str (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -131,7 +131,7 @@ STAGE PLANS: alias: infertypes Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(bi) = 9.223372036854776E18) or (UDFToDouble(i) = 2.147483648E9) or (UDFToDouble(ti) = 128.0) or (UDFToInteger(si) = 32768)) (type: boolean) + predicate: ((UDFToDouble(bi) = 9.223372036854776E18D) or (UDFToDouble(i) = 2.147483648E9D) or (UDFToDouble(ti) = 128.0D) or (UDFToInteger(si) = 32768)) (type: boolean) Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string) @@ -194,7 +194,7 @@ STAGE PLANS: alias: infertypes Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((CAST( si AS decimal(5,0)) = 327) or (UDFToDouble(i) = -100.0) or (UDFToDouble(ti) = 127.0)) (type: boolean) + predicate: ((CAST( si AS decimal(5,0)) = 327) or (UDFToDouble(i) = -100.0D) or (UDFToDouble(ti) = 127.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string) @@ -251,7 +251,7 @@ STAGE PLANS: alias: infertypes Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(i) > 100.0) and (UDFToDouble(str) = 1.57) and (UDFToDouble(ti) < 127.0)) (type: boolean) + predicate: ((UDFToDouble(i) > 100.0D) and (UDFToDouble(str) = 1.57D) and (UDFToDouble(ti) < 127.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 1170 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string) diff --git a/ql/src/test/results/clientpositive/input11.q.out b/ql/src/test/results/clientpositive/input11.q.out index bb59b4f992..1e98764506 100644 --- a/ql/src/test/results/clientpositive/input11.q.out +++ b/ql/src/test/results/clientpositive/input11.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) diff --git a/ql/src/test/results/clientpositive/input11_limit.q.out b/ql/src/test/results/clientpositive/input11_limit.q.out index 8fb7b630fc..64d5d4f61c 100644 --- a/ql/src/test/results/clientpositive/input11_limit.q.out +++ b/ql/src/test/results/clientpositive/input11_limit.q.out @@ -27,7 +27,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/input2_limit.q.out b/ql/src/test/results/clientpositive/input2_limit.q.out index 1404349aed..c11e717528 100644 --- a/ql/src/test/results/clientpositive/input2_limit.q.out +++ b/ql/src/test/results/clientpositive/input2_limit.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 300.0) (type: boolean) + predicate: (UDFToDouble(key) < 300.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/input42.q.out b/ql/src/test/results/clientpositive/input42.q.out index 7b9e63943e..07dcdfaa17 100644 --- a/ql/src/test/results/clientpositive/input42.q.out +++ b/ql/src/test/results/clientpositive/input42.q.out @@ -1240,7 +1240,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) @@ -1751,7 +1751,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (rand(100) < 0.1) (type: boolean) + predicate: (rand(100) < 0.1D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/input_part1.q.out b/ql/src/test/results/clientpositive/input_part1.q.out index 31ce924066..31f45c290f 100644 --- a/ql/src/test/results/clientpositive/input_part1.q.out +++ b/ql/src/test/results/clientpositive/input_part1.q.out @@ -34,7 +34,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string), '12' (type: string), '2008-04-08' (type: string) diff --git a/ql/src/test/results/clientpositive/input_part4.q.out b/ql/src/test/results/clientpositive/input_part4.q.out index 753e268c54..fd09911f3d 100644 --- a/ql/src/test/results/clientpositive/input_part4.q.out +++ b/ql/src/test/results/clientpositive/input_part4.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hr) = 15.0) and (ds = '2008-04-08')) (type: boolean) + predicate: ((UDFToDouble(hr) = 15.0D) and (ds = '2008-04-08')) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/input_part5.q.out b/ql/src/test/results/clientpositive/input_part5.q.out index 33ecd59709..0a5be8c66b 100644 --- a/ql/src/test/results/clientpositive/input_part5.q.out +++ b/ql/src/test/results/clientpositive/input_part5.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/interval_alt.q.out b/ql/src/test/results/clientpositive/interval_alt.q.out index 53c1be8d05..601504c1f3 100644 --- a/ql/src/test/results/clientpositive/interval_alt.q.out +++ b/ql/src/test/results/clientpositive/interval_alt.q.out @@ -139,7 +139,7 @@ STAGE PLANS: alias: t Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (2012-01-01 + IntervalDayLiteralProcessor(((- dt) * dt))) (type: timestamp), (2012-01-01 - IntervalDayLiteralProcessor(((- dt) * dt))) (type: timestamp), 2012-01-04 00:00:00.0 (type: timestamp), (2012-01-01 + IntervalYearMonthLiteralProcessor(concat(dt, '-1'))) (type: date) + expressions: (DATE'2012-01-01' + IntervalDayLiteralProcessor(((- dt) * dt))) (type: timestamp), (DATE'2012-01-01' - IntervalDayLiteralProcessor(((- dt) * dt))) (type: timestamp), TIMESTAMP'2012-01-04 00:00:00.0' (type: timestamp), (DATE'2012-01-01' + IntervalYearMonthLiteralProcessor(concat(dt, '-1'))) (type: date) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/interval_arithmetic.q.out b/ql/src/test/results/clientpositive/interval_arithmetic.q.out index eba97face9..c7eadea7d2 100644 --- a/ql/src/test/results/clientpositive/interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/interval_arithmetic.q.out @@ -54,7 +54,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dateval (type: date), (dateval - 2-2) (type: date), (dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + -2-2) (type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date) + expressions: dateval (type: date), (dateval - INTERVAL'2-2') (type: date), (dateval - INTERVAL'-2-2') (type: date), (dateval + INTERVAL'2-2') (type: date), (dateval + INTERVAL'-2-2') (type: date), (INTERVAL'-2-2' + dateval) (type: date), (INTERVAL'2-2' + dateval) (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Limit @@ -132,7 +132,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dateval (type: date), (dateval - 1999-06-07) (type: interval_day_time), (1999-06-07 - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) + expressions: dateval (type: date), (dateval - DATE'1999-06-07') (type: interval_day_time), (DATE'1999-06-07' - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Limit @@ -210,7 +210,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: tsval (type: timestamp), (tsval - 2-2) (type: timestamp), (tsval - -2-2) (type: timestamp), (tsval + 2-2) (type: timestamp), (tsval + -2-2) (type: timestamp), (-2-2 + tsval) (type: timestamp), (2-2 + tsval) (type: timestamp) + expressions: tsval (type: timestamp), (tsval - INTERVAL'2-2') (type: timestamp), (tsval - INTERVAL'-2-2') (type: timestamp), (tsval + INTERVAL'2-2') (type: timestamp), (tsval + INTERVAL'-2-2') (type: timestamp), (INTERVAL'-2-2' + tsval) (type: timestamp), (INTERVAL'2-2' + tsval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Limit @@ -284,7 +284,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + expressions: INTERVAL'5-5' (type: interval_year_month), INTERVAL'-1-1' (type: interval_year_month) outputColumnNames: _col0, _col1 Statistics: Num rows: 12288 Data size: 196608 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -346,7 +346,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dateval (type: date), (dateval - 99 11:22:33.123456789) (type: timestamp), (dateval - -99 11:22:33.123456789) (type: timestamp), (dateval + 99 11:22:33.123456789) (type: timestamp), (dateval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + dateval) (type: timestamp), (99 11:22:33.123456789 + dateval) (type: timestamp) + expressions: dateval (type: date), (dateval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + dateval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + dateval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Limit @@ -506,7 +506,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: tsval (type: timestamp), (tsval - 99 11:22:33.123456789) (type: timestamp), (tsval - -99 11:22:33.123456789) (type: timestamp), (tsval + 99 11:22:33.123456789) (type: timestamp), (tsval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + tsval) (type: timestamp), (99 11:22:33.123456789 + tsval) (type: timestamp) + expressions: tsval (type: timestamp), (tsval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + tsval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + tsval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: NONE Limit @@ -580,7 +580,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 109 20:30:40.246913578 (type: interval_day_time), 89 02:14:26.000000000 (type: interval_day_time) + expressions: INTERVAL'109 20:30:40.246913578' (type: interval_day_time), INTERVAL'89 02:14:26.000000000' (type: interval_day_time) outputColumnNames: _col0, _col1 Statistics: Num rows: 12288 Data size: 294912 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -624,7 +624,7 @@ STAGE PLANS: alias: interval_arithmetic_1 Statistics: Num rows: 12288 Data size: 326837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2016-11-11 03:04:00.0 (type: timestamp) + expressions: TIMESTAMP'2016-11-11 03:04:00.0' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 12288 Data size: 491520 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/ivyDownload.q.out b/ql/src/test/results/clientpositive/ivyDownload.q.out index 6bc9cb178e..eb94712f27 100644 --- a/ql/src/test/results/clientpositive/ivyDownload.q.out +++ b/ql/src/test/results/clientpositive/ivyDownload.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) + expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003D (type: double), 6.6D (type: double), 11.0D (type: double), 10.4D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/join11.q.out b/ql/src/test/results/clientpositive/join11.q.out index 4b5e4d0da8..aa717dfab3 100644 --- a/ql/src/test/results/clientpositive/join11.q.out +++ b/ql/src/test/results/clientpositive/join11.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -41,7 +41,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join12.q.out b/ql/src/test/results/clientpositive/join12.q.out index f79e27222a..29e3b26858 100644 --- a/ql/src/test/results/clientpositive/join12.q.out +++ b/ql/src/test/results/clientpositive/join12.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -47,7 +47,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -62,7 +62,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join13.q.out b/ql/src/test/results/clientpositive/join13.q.out index f562d69cc6..17c8ca765d 100644 --- a/ql/src/test/results/clientpositive/join13.q.out +++ b/ql/src/test/results/clientpositive/join13.q.out @@ -33,7 +33,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -49,7 +49,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -90,7 +90,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/join14.q.out b/ql/src/test/results/clientpositive/join14.q.out index cb456e23d7..1352237ffb 100644 --- a/ql/src/test/results/clientpositive/join14.q.out +++ b/ql/src/test/results/clientpositive/join14.q.out @@ -28,7 +28,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -43,7 +43,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join16.q.out b/ql/src/test/results/clientpositive/join16.q.out index 39c640819e..cb1d572a1c 100644 --- a/ql/src/test/results/clientpositive/join16.q.out +++ b/ql/src/test/results/clientpositive/join16.q.out @@ -14,7 +14,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -29,7 +29,7 @@ STAGE PLANS: alias: tab Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join34.q.out b/ql/src/test/results/clientpositive/join34.q.out index 7675ad9870..a28d7606ad 100644 --- a/ql/src/test/results/clientpositive/join34.q.out +++ b/ql/src/test/results/clientpositive/join34.q.out @@ -48,7 +48,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -69,7 +69,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -155,7 +155,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join35.q.out b/ql/src/test/results/clientpositive/join35.q.out index 4590ef946d..4a5b10d7af 100644 --- a/ql/src/test/results/clientpositive/join35.q.out +++ b/ql/src/test/results/clientpositive/join35.q.out @@ -45,7 +45,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -154,7 +154,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -535,7 +535,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/join38.q.out b/ql/src/test/results/clientpositive/join38.q.out index 2857de35cc..1ae4339f04 100644 --- a/ql/src/test/results/clientpositive/join38.q.out +++ b/ql/src/test/results/clientpositive/join38.q.out @@ -66,7 +66,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 2 Data size: 126 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(col11) = 111.0) (type: boolean) + predicate: (UDFToDouble(col11) = 111.0D) (type: boolean) Statistics: Num rows: 1 Data size: 63 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: col5 (type: string), col11 (type: string) @@ -84,7 +84,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 111.0) (type: boolean) + predicate: (UDFToDouble(key) = 111.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join39.q.out b/ql/src/test/results/clientpositive/join39.q.out index 80272a4caf..f3b4a6dae4 100644 --- a/ql/src/test/results/clientpositive/join39.q.out +++ b/ql/src/test/results/clientpositive/join39.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 100.0) (type: boolean) + predicate: (UDFToDouble(key) <= 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join4.q.out b/ql/src/test/results/clientpositive/join4.q.out index 95a4dbfac6..7204250b5e 100644 --- a/ql/src/test/results/clientpositive/join4.q.out +++ b/ql/src/test/results/clientpositive/join4.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -66,7 +66,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join40.q.out b/ql/src/test/results/clientpositive/join40.q.out index e920bbbee4..9d699c85be 100644 --- a/ql/src/test/results/clientpositive/join40.q.out +++ b/ql/src/test/results/clientpositive/join40.q.out @@ -29,7 +29,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 100.0) (type: boolean) + predicate: (UDFToDouble(key) <= 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -3098,7 +3098,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 100.0) (type: boolean) + predicate: (UDFToDouble(key) <= 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join41.q.out b/ql/src/test/results/clientpositive/join41.q.out index 2ea4912d90..50ce9f6afb 100644 --- a/ql/src/test/results/clientpositive/join41.q.out +++ b/ql/src/test/results/clientpositive/join41.q.out @@ -41,7 +41,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 10.0) (type: boolean) + predicate: (UDFToDouble(key) > 10.0D) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -118,7 +118,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 10.0) (type: boolean) + predicate: (UDFToDouble(key) > 10.0D) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join45.q.out b/ql/src/test/results/clientpositive/join45.q.out index 3d49a24431..f2cca31ebb 100644 --- a/ql/src/test/results/clientpositive/join45.q.out +++ b/ql/src/test/results/clientpositive/join45.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(value) BETWEEN 100.0 AND 102.0 and key is not null) (type: boolean) + predicate: (UDFToDouble(value) BETWEEN 100.0D AND 102.0D and key is not null) (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -42,7 +42,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(value) BETWEEN 100.0 AND 102.0 and key is not null) (type: boolean) + predicate: (UDFToDouble(value) BETWEEN 100.0D AND 102.0D and key is not null) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -233,7 +233,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: UDFToDouble(value) BETWEEN 100.0 AND 102.0 (type: boolean) + predicate: UDFToDouble(value) BETWEEN 100.0D AND 102.0D (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -247,7 +247,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: UDFToDouble(value) BETWEEN 100.0 AND 102.0 (type: boolean) + predicate: UDFToDouble(value) BETWEEN 100.0D AND 102.0D (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -361,7 +361,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 = _col2) or UDFToDouble(_col1) BETWEEN 100.0 AND 102.0 or UDFToDouble(_col3) BETWEEN 100.0 AND 102.0) (type: boolean) + predicate: ((_col0 = _col2) or UDFToDouble(_col1) BETWEEN 100.0D AND 102.0D or UDFToDouble(_col3) BETWEEN 100.0D AND 102.0D) (type: boolean) Statistics: Num rows: 9026 Data size: 173876 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -466,7 +466,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0) and ((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0)) (type: boolean) + predicate: (((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0D) and ((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0D)) (type: boolean) Statistics: Num rows: 1388 Data size: 26738 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -565,7 +565,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0) or ((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0)) (type: boolean) + predicate: (((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0D) or ((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0D)) (type: boolean) Statistics: Num rows: 8332 Data size: 160507 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -797,7 +797,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 13750 Data size: 264875 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) >= 100.0) (type: boolean) + predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) >= 100.0D) (type: boolean) Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col2 (type: string), _col3 (type: string) @@ -944,7 +944,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 13750 Data size: 264875 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0) (type: boolean) + predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D) (type: boolean) Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -1048,7 +1048,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0) (type: boolean) + predicate: ((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0D) (type: boolean) Statistics: Num rows: 4166 Data size: 80253 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1230,7 +1230,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 13750 Data size: 264875 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0) (type: boolean) + predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D) (type: boolean) Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -1324,7 +1324,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12500 Data size: 240800 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0) (type: boolean) + predicate: ((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0D) (type: boolean) Statistics: Num rows: 4166 Data size: 80253 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1506,7 +1506,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 13750 Data size: 264875 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0) (type: boolean) + predicate: ((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D) (type: boolean) Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 diff --git a/ql/src/test/results/clientpositive/join47.q.out b/ql/src/test/results/clientpositive/join47.q.out index 20dce5d3bb..4a13df86d0 100644 --- a/ql/src/test/results/clientpositive/join47.q.out +++ b/ql/src/test/results/clientpositive/join47.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(value) BETWEEN 100.0 AND 102.0 and key is not null) (type: boolean) + predicate: (UDFToDouble(value) BETWEEN 100.0D AND 102.0D and key is not null) (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -42,7 +42,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(value) BETWEEN 100.0 AND 102.0 and key is not null) (type: boolean) + predicate: (UDFToDouble(value) BETWEEN 100.0D AND 102.0D and key is not null) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -233,7 +233,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: UDFToDouble(value) BETWEEN 100.0 AND 102.0 (type: boolean) + predicate: UDFToDouble(value) BETWEEN 100.0D AND 102.0D (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -247,7 +247,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: UDFToDouble(value) BETWEEN 100.0 AND 102.0 (type: boolean) + predicate: UDFToDouble(value) BETWEEN 100.0D AND 102.0D (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -359,7 +359,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((_col0 = _col2) or UDFToDouble(_col1) BETWEEN 100.0 AND 102.0 or UDFToDouble(_col3) BETWEEN 100.0 AND 102.0)} + residual filter predicates: {((_col0 = _col2) or UDFToDouble(_col1) BETWEEN 100.0D AND 102.0D or UDFToDouble(_col3) BETWEEN 100.0D AND 102.0D)} Statistics: Num rows: 9026 Data size: 173876 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -462,7 +462,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0)} {((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0D)} {((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0D)} Statistics: Num rows: 1388 Data size: 26738 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -559,7 +559,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {(((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0) or ((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0))} + residual filter predicates: {(((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0D) or ((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0D))} Statistics: Num rows: 8332 Data size: 160507 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -787,7 +787,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) >= 100.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) >= 100.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col2 (type: string), _col3 (type: string) @@ -932,7 +932,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -1034,7 +1034,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0)} + residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0D)} Statistics: Num rows: 4166 Data size: 80253 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1214,7 +1214,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -1306,7 +1306,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0)} + residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0D)} Statistics: Num rows: 4166 Data size: 80253 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1486,7 +1486,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 diff --git a/ql/src/test/results/clientpositive/join5.q.out b/ql/src/test/results/clientpositive/join5.q.out index 7ce3b34a2d..87c9efa5fb 100644 --- a/ql/src/test/results/clientpositive/join5.q.out +++ b/ql/src/test/results/clientpositive/join5.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -66,7 +66,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join6.q.out b/ql/src/test/results/clientpositive/join6.q.out index 989ae2a512..978cc45963 100644 --- a/ql/src/test/results/clientpositive/join6.q.out +++ b/ql/src/test/results/clientpositive/join6.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -66,7 +66,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join7.q.out b/ql/src/test/results/clientpositive/join7.q.out index f34df53d83..91c4eb7557 100644 --- a/ql/src/test/results/clientpositive/join7.q.out +++ b/ql/src/test/results/clientpositive/join7.q.out @@ -60,7 +60,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -92,7 +92,7 @@ STAGE PLANS: alias: src3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out index bfe4fd8411..346a6a38b0 100644 --- a/ql/src/test/results/clientpositive/join8.q.out +++ b/ql/src/test/results/clientpositive/join8.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -66,7 +66,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/join_vc.q.out b/ql/src/test/results/clientpositive/join_vc.q.out index 62671f2cfd..963f4eb7f5 100644 --- a/ql/src/test/results/clientpositive/join_vc.q.out +++ b/ql/src/test/results/clientpositive/join_vc.q.out @@ -167,7 +167,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -182,7 +182,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) diff --git a/ql/src/test/results/clientpositive/limit_pushdown2.q.out b/ql/src/test/results/clientpositive/limit_pushdown2.q.out index f4cff2bcc0..5aeb5213e5 100644 --- a/ql/src/test/results/clientpositive/limit_pushdown2.q.out +++ b/ql/src/test/results/clientpositive/limit_pushdown2.q.out @@ -20,7 +20,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -114,7 +114,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -208,7 +208,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -302,7 +302,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -396,7 +396,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -490,7 +490,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -585,7 +585,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -701,7 +701,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -817,7 +817,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -932,12 +932,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE @@ -1021,12 +1021,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out index 7bcde29a90..b46eb5cf5a 100644 --- a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out +++ b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out @@ -107,7 +107,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 100.0) (type: boolean) + predicate: (_col1 > 100.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out index 5b625f0d41..40011a76cc 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out @@ -36,7 +36,7 @@ STAGE PLANS: Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -401,7 +401,7 @@ STAGE PLANS: Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out index 4160ad60b7..83d6c3bf11 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out @@ -36,7 +36,7 @@ STAGE PLANS: Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -401,7 +401,7 @@ STAGE PLANS: Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out index 7e9aad484d..22096dc591 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out @@ -36,7 +36,7 @@ STAGE PLANS: Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/literal_double.q.out b/ql/src/test/results/clientpositive/literal_double.q.out index c127d68ed6..76236ca06b 100644 --- a/ql/src/test/results/clientpositive/literal_double.q.out +++ b/ql/src/test/results/clientpositive/literal_double.q.out @@ -14,7 +14,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 3.14E8 (type: double), 3.14E-8 (type: double), -3.14E8 (type: double), -3.14E-8 (type: double), 3.14E8 (type: double), 3.14E8 (type: double), 3.14E-8 (type: double) + expressions: 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 3.14E8D (type: double), 3.14E-8D (type: double), -3.14E8D (type: double), -3.14E-8D (type: double), 3.14E8D (type: double), 3.14E8D (type: double), 3.14E-8D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 500 Data size: 140000 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/literal_ints.q.out b/ql/src/test/results/clientpositive/literal_ints.q.out index 9a56ebd5eb..a3230b94c9 100644 --- a/ql/src/test/results/clientpositive/literal_ints.q.out +++ b/ql/src/test/results/clientpositive/literal_ints.q.out @@ -14,7 +14,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 100 (type: int), 100 (type: tinyint), 100 (type: smallint), 100 (type: bigint) + expressions: 100 (type: int), 100Y (type: tinyint), 100S (type: smallint), 100L (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 10000 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out index 68801f0294..11a99dbc33 100644 --- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out +++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out @@ -428,10 +428,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363) and (t < 100)) (type: boolean) + filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2098 Data size: 41920 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b = 4294967363) and (t < 100)) (type: boolean) + predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int) @@ -498,10 +498,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363) and (t < 100)) (type: boolean) + filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2098 Data size: 41920 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b = 4294967363) and (t < 100)) (type: boolean) + predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), t (type: tinyint), si (type: smallint), i (type: int) @@ -571,10 +571,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363) and (t < 100)) (type: boolean) + filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2098 Data size: 706986 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b = 4294967363) and (t < 100)) (type: boolean) + predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2 Data size: 674 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), t (type: tinyint), si (type: smallint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) @@ -592,7 +592,7 @@ STAGE PLANS: Execution mode: vectorized, llap Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), 0 (type: int), 4294967363 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: boolean), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: decimal(4,2)), VALUE._col9 (type: binary) + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), 0 (type: int), 4294967363L (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: boolean), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: decimal(4,2)), VALUE._col9 (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 2 Data size: 834 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -694,7 +694,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/llap/column_access_stats.q.out b/ql/src/test/results/clientpositive/llap/column_access_stats.q.out index 7929645566..5788193a4a 100644 --- a/ql/src/test/results/clientpositive/llap/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/column_access_stats.q.out @@ -520,7 +520,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(val) = 3.0) and key is not null) (type: boolean) + predicate: ((UDFToDouble(val) = 3.0D) and key is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -540,7 +540,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 5 Data size: 850 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(val) = 3.0) and key is not null) (type: boolean) + predicate: ((UDFToDouble(val) = 3.0D) and key is not null) (type: boolean) Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), val (type: string) @@ -622,7 +622,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 5.0) and val is not null) (type: boolean) + predicate: ((UDFToDouble(key) = 5.0D) and val is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: val (type: string) @@ -641,7 +641,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 5 Data size: 850 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) = 6.0) and val is not null) (type: boolean) + predicate: ((UDFToDouble(key) = 6.0D) and val is not null) (type: boolean) Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: val (type: string) diff --git a/ql/src/test/results/clientpositive/llap/cte_5.q.out b/ql/src/test/results/clientpositive/llap/cte_5.q.out index 17a86d3e6c..76ba82961f 100644 --- a/ql/src/test/results/clientpositive/llap/cte_5.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_5.q.out @@ -95,7 +95,7 @@ Stage-0 XPROD_EDGE [RS_6] Select Operator [SEL_2] (rows=1 width=4) Filter Operator [FIL_11] (rows=1 width=4) - predicate:(UDFToDouble(colnum) = 5.0) + predicate:(UDFToDouble(colnum) = 5.0D) TableScan [TS_0] (rows=1 width=4) mydb@q1,a,Tbl:COMPLETE,Col:COMPLETE,Output:["colnum"] <-Map 3 [XPROD_EDGE] llap diff --git a/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out b/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out index 98ede4e995..0052f5868c 100644 --- a/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out +++ b/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out @@ -55,7 +55,7 @@ STAGE PLANS: alias: alltypesorc GatherStats: false Select Operator - expressions: 2012-01-01 01:02:03.0 (type: timestamp) + expressions: TIMESTAMP'2012-01-01 01:02:03.0' (type: timestamp) outputColumnNames: _col0 ListSink diff --git a/ql/src/test/results/clientpositive/llap/default_constraint.q.out b/ql/src/test/results/clientpositive/llap/default_constraint.q.out new file mode 100644 index 0000000000..14790b7636 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/default_constraint.q.out @@ -0,0 +1,2793 @@ +PREHOOK: query: CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@numericDataType +POSTHOOK: query: CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@numericDataType +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [a] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +Constraint Name: tinyint_constraint +Column Name:a Default Value:127Y + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(a) values(3Y) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(a) values(3Y) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(3)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), 32767S (type: smallint), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), 1234567.89 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToByte(_col0) (type: tinyint) + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(9,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: UDFToByte(VALUE._col0) (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(a) values(3Y) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(a) values(3Y) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SCRIPT [] +POSTHOOK: Lineage: numericdatatype.b SIMPLE [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SIMPLE [] +PREHOOK: query: SELECT * FROM numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +3 32767 2147483647 9223372036854775807 3.4E38 1234567.89 +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(e,f) values(4.5, 678.4) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(e,f) values(4.5, 678.4) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(4.5,678.4)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: 127Y (type: tinyint), 32767S (type: smallint), 2147483647 (type: int), 9223372036854775807L (type: bigint), col1 (type: decimal(2,1)), col2 (type: decimal(4,1)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: decimal(2,1)), _col5 (type: decimal(4,1)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), UDFToDouble(VALUE._col4) (type: double), CAST( VALUE._col5 AS decimal(9,2)) (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(e,f) values(4.5, 678.4) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(e,f) values(4.5, 678.4) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SIMPLE [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SCRIPT [] +POSTHOOK: Lineage: numericdatatype.f SCRIPT [] +PREHOOK: query: SELECT * FROM numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +3 32767 2147483647 9223372036854775807 3.4E38 1234567.89 +127 32767 2147483647 9223372036854775807 4.5 678.40 +PREHOOK: query: DROP TABLE numericDataType +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@numericdatatype +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: DROP TABLE numericDataType +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@numericdatatype +POSTHOOK: Output: default@numericdatatype +PREHOOK: query: -- Date/time +CREATE TABLE table1(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000', + tz timestamp with local time zone DEFAULT TIMESTAMPLOCALTZ'2016-01-03 12:26:34 America/Los_Angeles', + d1 DATE DEFAULT current_date() ENABLE, t1 TIMESTAMP DEFAULT current_timestamp() DISABLE) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table1 +POSTHOOK: query: -- Date/time +CREATE TABLE table1(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000', + tz timestamp with local time zone DEFAULT TIMESTAMPLOCALTZ'2016-01-03 12:26:34 America/Los_Angeles', + d1 DATE DEFAULT current_date() ENABLE, t1 TIMESTAMP DEFAULT current_timestamp() DISABLE) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table1 +PREHOOK: query: DESC FORMATTED table1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@table1 +POSTHOOK: query: DESC FORMATTED table1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@table1 +# col_name data_type comment +d date +t timestamp +tz timestamp with local time zone('US/Pacific') +d1 date +t1 timestamp + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.table1 +Constraint Name: #### A masked pattern was here #### +Column Name:d1 Default Value:CURRENT_DATE() + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:DATE'2018-02-14' + +Constraint Name: #### A masked pattern was here #### +Column Name:t Default Value:TIMESTAMP'2016-02-22 12:45:07.0' + +Constraint Name: #### A masked pattern was here #### +Column Name:t1 Default Value:CURRENT_TIMESTAMP() + +Constraint Name: #### A masked pattern was here #### +Column Name:tz Default Value:TIMESTAMPLOCALTZ'2016-01-03 12:26:34.0 America/Los_Angeles' + +PREHOOK: query: EXPLAIN INSERT INTO table1(t) values ("1985-12-31 12:45:07") +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO table1(t) values ("1985-12-31 12:45:07") +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('1985-12-31 12:45:07')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: DATE'2018-02-14' (type: date), CAST( col1 AS TIMESTAMP) (type: timestamp), TIMESTAMPLOCALTZ'2016-01-03 12:26:34.0 US/Pacific' (type: timestamp with local time zone), CURRENT_DATE() (type: date), null (type: timestamp) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + Execution mode: llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + +PREHOOK: query: INSERT INTO table1(t) values ("1985-12-31 12:45:07") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table1 +POSTHOOK: query: INSERT INTO table1(t) values ("1985-12-31 12:45:07") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table1 +POSTHOOK: Lineage: table1.d SIMPLE [] +POSTHOOK: Lineage: table1.d1 EXPRESSION [] +POSTHOOK: Lineage: table1.t SCRIPT [] +POSTHOOK: Lineage: table1.t1 SIMPLE [] +POSTHOOK: Lineage: table1.tz SIMPLE [] +PREHOOK: query: SELECT d, t, tz,d1=current_date(), t1 from table1 +PREHOOK: type: QUERY +PREHOOK: Input: default@table1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT d, t, tz,d1=current_date(), t1 from table1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table1 +#### A masked pattern was here #### +2018-02-14 1985-12-31 12:45:07 2016-01-03 12:26:34.0 US/Pacific true NULL +PREHOOK: query: EXPLAIN INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('1985-12-31','2018-02-27 17:32:14.259')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: CAST( col1 AS DATE) (type: date), TIMESTAMP'2016-02-22 12:45:07.0' (type: timestamp), TIMESTAMPLOCALTZ'2016-01-03 12:26:34.0 US/Pacific' (type: timestamp with local time zone), CURRENT_DATE() (type: date), CAST( col2 AS TIMESTAMP) (type: timestamp) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + Execution mode: llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table1 + +PREHOOK: query: INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table1 +POSTHOOK: query: INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table1 +POSTHOOK: Lineage: table1.d SCRIPT [] +POSTHOOK: Lineage: table1.d1 EXPRESSION [] +POSTHOOK: Lineage: table1.t SIMPLE [] +POSTHOOK: Lineage: table1.t1 SCRIPT [] +POSTHOOK: Lineage: table1.tz SIMPLE [] +PREHOOK: query: SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1 +PREHOOK: type: QUERY +PREHOOK: Input: default@table1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table1 +#### A masked pattern was here #### +2018-02-14 1985-12-31 12:45:07 2016-01-03 12:26:34.0 US/Pacific true NULL +1985-12-31 2016-02-22 12:45:07 2016-01-03 12:26:34.0 US/Pacific true false +PREHOOK: query: DROP TABLE table1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@table1 +PREHOOK: Output: default@table1 +POSTHOOK: query: DROP TABLE table1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@table1 +POSTHOOK: Output: default@table1 +PREHOOK: query: CREATE TABLE table2(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(), + k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT cast('varchar_default_value' as varchar(350)), + c char(20) DEFAULT cast('char_value' as char(20))) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table2 +POSTHOOK: query: CREATE TABLE table2(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(), + k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT cast('varchar_default_value' as varchar(350)), + c char(20) DEFAULT cast('char_value' as char(20))) + clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table2 +PREHOOK: query: DESC FORMATTED table2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@table2 +POSTHOOK: query: DESC FORMATTED table2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@table2 +# col_name data_type comment +i string +j string +k string +v varchar(350) +c char(20) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [i] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.table2 +Constraint Name: #### A masked pattern was here #### +Column Name:j Default Value:CURRENT_USER() + +Constraint Name: #### A masked pattern was here #### +Column Name:k Default Value:'Current_User()' + +Constraint Name: #### A masked pattern was here #### +Column Name:v Default Value:CAST( 'varchar_default_value' AS varchar(350)) + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:CAST( 'char_value' AS CHAR(20)) + +Constraint Name: #### A masked pattern was here #### +Column Name:i Default Value:'current_database()' + +PREHOOK: query: EXPLAIN INSERT INTO table2(i) values('default') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO table2(i) values('default') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('default')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: string), CURRENT_USER() (type: string), 'Current_User()' (type: string), CAST( 'varchar_default_value' AS varchar(350)) (type: varchar(350)), CAST( 'char_value' AS CHAR(20)) (type: char(20)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 405 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 405 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: varchar(350)), _col4 (type: char(20)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: varchar(350)), VALUE._col4 (type: char(20)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 405 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 405 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.table2 + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.table2 + Write Type: INSERT + +PREHOOK: query: INSERT INTO table2(i) values('default') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: INSERT INTO table2(i) values('default') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Lineage: table2.c EXPRESSION [] +POSTHOOK: Lineage: table2.i SCRIPT [] +POSTHOOK: Lineage: table2.j EXPRESSION [] +POSTHOOK: Lineage: table2.k SIMPLE [] +POSTHOOK: Lineage: table2.v EXPRESSION [] +PREHOOK: query: SELECT i,j=current_user(),k,v,c FROM table2 +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT i,j=current_user(),k,v,c FROM table2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +#### A masked pattern was here #### +default true Current_User() varchar_default_value char_value +PREHOOK: query: EXPLAIN INSERT INTO table2(v, c) values('varchar_default2', 'char') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO table2(v, c) values('varchar_default2', 'char') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('varchar_default2','char')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: 'current_database()' (type: string), CURRENT_USER() (type: string), 'Current_User()' (type: string), col1 (type: string), col2 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 298 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 298 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), CAST( VALUE._col3 AS varchar(350)) (type: varchar(350)), CAST( VALUE._col4 AS CHAR(20)) (type: char(20)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 836 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 836 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.table2 + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.table2 + Write Type: INSERT + +PREHOOK: query: INSERT INTO table2(v, c) values('varchar_default2', 'char') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: INSERT INTO table2(v, c) values('varchar_default2', 'char') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Lineage: table2.c SCRIPT [] +POSTHOOK: Lineage: table2.i SIMPLE [] +POSTHOOK: Lineage: table2.j EXPRESSION [] +POSTHOOK: Lineage: table2.k SIMPLE [] +POSTHOOK: Lineage: table2.v SCRIPT [] +PREHOOK: query: SELECT i,j=current_user(),k,v,c FROM table2 +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT i,j=current_user(),k,v,c FROM table2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +#### A masked pattern was here #### +default true Current_User() varchar_default_value char_value +current_database() true Current_User() varchar_default2 char +PREHOOK: query: DROP TABLE table2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@table2 +PREHOOK: Output: default@table2 +POSTHOOK: query: DROP TABLE table2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@table2 +POSTHOOK: Output: default@table2 +PREHOOK: query: CREATE TABLE misc(b BOOLEAN DEFAULT true, b1 BINARY DEFAULT cast('bin' as binary)) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@misc +POSTHOOK: query: CREATE TABLE misc(b BOOLEAN DEFAULT true, b1 BINARY DEFAULT cast('bin' as binary)) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@misc +PREHOOK: query: DESC FORMATTED misc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@misc +POSTHOOK: query: DESC FORMATTED misc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@misc +# col_name data_type comment +b boolean +b1 binary + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.misc +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:true + +Constraint Name: #### A masked pattern was here #### +Column Name:b1 Default Value:CAST( 'bin' AS BINARY) + +PREHOOK: query: EXPLAIN INSERT INTO misc(b) values(false) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO misc(b) values(false) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(false)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: boolean), CAST( 'bin' AS BINARY) (type: binary) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: boolean) + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: boolean), _col1 (type: binary) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: boolean), VALUE._col1 (type: binary) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.misc + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.misc + Write Type: INSERT + +PREHOOK: query: INSERT INTO misc(b) values(false) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@misc +POSTHOOK: query: INSERT INTO misc(b) values(false) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@misc +POSTHOOK: Lineage: misc.b SCRIPT [] +POSTHOOK: Lineage: misc.b1 EXPRESSION [] +PREHOOK: query: SELECT b, b1 from misc +PREHOOK: type: QUERY +PREHOOK: Input: default@misc +#### A masked pattern was here #### +POSTHOOK: query: SELECT b, b1 from misc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@misc +#### A masked pattern was here #### +false bin +PREHOOK: query: EXPLAIN INSERT INTO misc(b1) values('011') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO misc(b1) values('011') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('011')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: true (type: boolean), col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: boolean), _col1 (type: string) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: boolean), CAST( VALUE._col1 AS BINARY) (type: binary) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 148 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 148 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.misc + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.misc + Write Type: INSERT + +PREHOOK: query: INSERT INTO misc(b) values(false) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@misc +POSTHOOK: query: INSERT INTO misc(b) values(false) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@misc +POSTHOOK: Lineage: misc.b SCRIPT [] +POSTHOOK: Lineage: misc.b1 EXPRESSION [] +PREHOOK: query: SELECT b, b1 from misc +PREHOOK: type: QUERY +PREHOOK: Input: default@misc +#### A masked pattern was here #### +POSTHOOK: query: SELECT b, b1 from misc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@misc +#### A masked pattern was here #### +false bin +false bin +PREHOOK: query: DROP TABLE misc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@misc +PREHOOK: Output: default@misc +POSTHOOK: query: DROP TABLE misc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@misc +POSTHOOK: Output: default@misc +PREHOOK: query: CREATE table t11(i int default cast(cast(4 as double) as int), + b1 boolean default cast ('true' as boolean), b2 int default cast (5.67 as int), + b3 tinyint default cast (45 as tinyint), b4 float default cast (45.4 as float), + b5 bigint default cast (567 as bigint), b6 smallint default cast (88 as smallint), + j varchar(50) default cast(current_timestamp() as varchar(50)), + k string default cast(cast(current_user() as varchar(50)) as string), + tz1 timestamp with local time zone DEFAULT cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), + ts timestamp default cast('2016-01-01 12:01:01' as timestamp), + dc decimal(8,2) default cast(4.5 as decimal(8,2)), + c2 double default cast(5 as double), c4 char(2) default cast(cast(cast('ab' as string) as varchar(2)) as char(2))) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t11 +POSTHOOK: query: CREATE table t11(i int default cast(cast(4 as double) as int), + b1 boolean default cast ('true' as boolean), b2 int default cast (5.67 as int), + b3 tinyint default cast (45 as tinyint), b4 float default cast (45.4 as float), + b5 bigint default cast (567 as bigint), b6 smallint default cast (88 as smallint), + j varchar(50) default cast(current_timestamp() as varchar(50)), + k string default cast(cast(current_user() as varchar(50)) as string), + tz1 timestamp with local time zone DEFAULT cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time zone), + ts timestamp default cast('2016-01-01 12:01:01' as timestamp), + dc decimal(8,2) default cast(4.5 as decimal(8,2)), + c2 double default cast(5 as double), c4 char(2) default cast(cast(cast('ab' as string) as varchar(2)) as char(2))) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t11 +PREHOOK: query: DESC FORMATTED t11 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@t11 +POSTHOOK: query: DESC FORMATTED t11 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@t11 +# col_name data_type comment +i int +b1 boolean +b2 int +b3 tinyint +b4 float +b5 bigint +b6 smallint +j varchar(50) +k string +tz1 timestamp with local time zone('US/Pacific') +ts timestamp +dc decimal(8,2) +c2 double +c4 char(2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.t11 +Constraint Name: #### A masked pattern was here #### +Column Name:tz1 Default Value:CAST( '2016-01-03 12:26:34 America/Los_Angeles' AS timestamp with local time zone) + +Constraint Name: #### A masked pattern was here #### +Column Name:b1 Default Value:UDFToBoolean('true') + +Constraint Name: #### A masked pattern was here #### +Column Name:i Default Value:UDFToInteger(UDFToDouble(4)) + +Constraint Name: #### A masked pattern was here #### +Column Name:b2 Default Value:UDFToInteger(5.67) + +Constraint Name: #### A masked pattern was here #### +Column Name:j Default Value:CAST( CURRENT_TIMESTAMP() AS varchar(50)) + +Constraint Name: #### A masked pattern was here #### +Column Name:b3 Default Value:UDFToByte(45) + +Constraint Name: #### A masked pattern was here #### +Column Name:k Default Value:UDFToString(CAST( CURRENT_USER() AS varchar(50))) + +Constraint Name: #### A masked pattern was here #### +Column Name:b4 Default Value:UDFToFloat(45.4) + +Constraint Name: #### A masked pattern was here #### +Column Name:b5 Default Value:UDFToLong(567) + +Constraint Name: #### A masked pattern was here #### +Column Name:b6 Default Value:UDFToShort(88) + +Constraint Name: #### A masked pattern was here #### +Column Name:c2 Default Value:UDFToDouble(5) + +Constraint Name: #### A masked pattern was here #### +Column Name:c4 Default Value:CAST( CAST( 'ab' AS varchar(2)) AS CHAR(2)) + +Constraint Name: #### A masked pattern was here #### +Column Name:ts Default Value:CAST( '2016-01-01 12:01:01' AS TIMESTAMP) + +Constraint Name: #### A masked pattern was here #### +Column Name:dc Default Value:CAST( 4.5 AS decimal(8,2)) + +PREHOOK: query: EXPLAIN INSERT INTO t11(c4) values('vi') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO t11(c4) values('vi') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('vi')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: UDFToInteger(UDFToDouble(4)) (type: int), UDFToBoolean('true') (type: boolean), UDFToInteger(5.67) (type: int), UDFToByte(45) (type: tinyint), UDFToFloat(45.4) (type: float), UDFToLong(567) (type: bigint), UDFToShort(88) (type: smallint), CAST( CURRENT_TIMESTAMP() AS varchar(50)) (type: varchar(50)), UDFToString(CAST( CURRENT_USER() AS varchar(50))) (type: string), CAST( '2016-01-03 12:26:34 America/Los_Angeles' AS timestamp with local time zone) (type: timestamp with local time zone), CAST( '2016-01-01 12:01:01' AS TIMESTAMP) (type: timestamp), CAST( 4.5 AS decimal(8,2)) (type: decimal(8,2)), UDFToDouble(5) (type: double), CAST( col1 AS CHAR(2)) (type: char(2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 + Statistics: Num rows: 1 Data size: 522 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 522 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.t11 + Execution mode: llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.t11 + +PREHOOK: query: INSERT INTO t11(c4) values('vi') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t11 +POSTHOOK: query: INSERT INTO t11(c4) values('vi') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t11 +POSTHOOK: Lineage: t11.b1 EXPRESSION [] +POSTHOOK: Lineage: t11.b2 EXPRESSION [] +POSTHOOK: Lineage: t11.b3 EXPRESSION [] +POSTHOOK: Lineage: t11.b4 EXPRESSION [] +POSTHOOK: Lineage: t11.b5 EXPRESSION [] +POSTHOOK: Lineage: t11.b6 EXPRESSION [] +POSTHOOK: Lineage: t11.c2 EXPRESSION [] +POSTHOOK: Lineage: t11.c4 SCRIPT [] +POSTHOOK: Lineage: t11.dc EXPRESSION [] +POSTHOOK: Lineage: t11.i EXPRESSION [] +POSTHOOK: Lineage: t11.j EXPRESSION [] +POSTHOOK: Lineage: t11.k EXPRESSION [] +POSTHOOK: Lineage: t11.ts EXPRESSION [] +POSTHOOK: Lineage: t11.tz1 EXPRESSION [] +PREHOOK: query: SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11 +PREHOOK: type: QUERY +PREHOOK: Input: default@t11 +#### A masked pattern was here #### +POSTHOOK: query: SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t11 +#### A masked pattern was here #### +2016-01-01 12:01:01 2016-01-03 12:26:34.0 US/Pacific 4.50 true 5 45 45.4 567 88 false true 5.0 vi +PREHOOK: query: EXPLAIN INSERT INTO t11(b1,c4) values(true,'ga') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO t11(b1,c4) values(true,'ga') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(true,'ga')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: UDFToInteger(UDFToDouble(4)) (type: int), col1 (type: boolean), UDFToInteger(5.67) (type: int), UDFToByte(45) (type: tinyint), UDFToFloat(45.4) (type: float), UDFToLong(567) (type: bigint), UDFToShort(88) (type: smallint), CAST( CURRENT_TIMESTAMP() AS varchar(50)) (type: varchar(50)), UDFToString(CAST( CURRENT_USER() AS varchar(50))) (type: string), CAST( '2016-01-03 12:26:34 America/Los_Angeles' AS timestamp with local time zone) (type: timestamp with local time zone), CAST( '2016-01-01 12:01:01' AS TIMESTAMP) (type: timestamp), CAST( 4.5 AS decimal(8,2)) (type: decimal(8,2)), UDFToDouble(5) (type: double), CAST( col2 AS CHAR(2)) (type: char(2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 + Statistics: Num rows: 1 Data size: 519 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 519 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.t11 + Execution mode: llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.t11 + +PREHOOK: query: INSERT INTO t11(c4) values('vi') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t11 +POSTHOOK: query: INSERT INTO t11(c4) values('vi') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t11 +POSTHOOK: Lineage: t11.b1 EXPRESSION [] +POSTHOOK: Lineage: t11.b2 EXPRESSION [] +POSTHOOK: Lineage: t11.b3 EXPRESSION [] +POSTHOOK: Lineage: t11.b4 EXPRESSION [] +POSTHOOK: Lineage: t11.b5 EXPRESSION [] +POSTHOOK: Lineage: t11.b6 EXPRESSION [] +POSTHOOK: Lineage: t11.c2 EXPRESSION [] +POSTHOOK: Lineage: t11.c4 SCRIPT [] +POSTHOOK: Lineage: t11.dc EXPRESSION [] +POSTHOOK: Lineage: t11.i EXPRESSION [] +POSTHOOK: Lineage: t11.j EXPRESSION [] +POSTHOOK: Lineage: t11.k EXPRESSION [] +POSTHOOK: Lineage: t11.ts EXPRESSION [] +POSTHOOK: Lineage: t11.tz1 EXPRESSION [] +PREHOOK: query: SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11 +PREHOOK: type: QUERY +PREHOOK: Input: default@t11 +#### A masked pattern was here #### +POSTHOOK: query: SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t11 +#### A masked pattern was here #### +2016-01-01 12:01:01 2016-01-03 12:26:34.0 US/Pacific 4.50 true 5 45 45.4 567 88 false true 5.0 vi +2016-01-01 12:01:01 2016-01-03 12:26:34.0 US/Pacific 4.50 true 5 45 45.4 567 88 false true 5.0 vi +PREHOOK: query: DROP TABLE t11 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t11 +PREHOOK: Output: default@t11 +POSTHOOK: query: DROP TABLE t11 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t11 +POSTHOOK: Output: default@t11 +PREHOOK: query: CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@numericDataType +POSTHOOK: query: CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@numericDataType +PREHOOK: query: ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT +POSTHOOK: query: ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(456) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(456) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(456)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: tinyint), col1 (type: int), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), 1234567.89 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToShort(_col1) (type: smallint) + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(9,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), UDFToShort(VALUE._col1) (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(b) values(456) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(b) values(456) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SCRIPT [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SIMPLE [] +PREHOOK: query: SELECT * from numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +PREHOOK: query: ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE +PREHOOK: type: ALTERTABLE_ADDCONSTRAINT +POSTHOOK: query: ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE +POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + numFiles 1 + totalSize 1035 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(56) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(56) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(56)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: tinyint), col1 (type: int), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), 1234567.89 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToShort(_col1) (type: smallint) + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(9,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), UDFToShort(VALUE._col1) (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(b) values(456) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(b) values(456) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SCRIPT [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SIMPLE [] +PREHOOK: query: SELECT * from numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +PREHOOK: query: ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT second_null_constraint NOT NULL ENABLE +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@numericdatatype +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT second_null_constraint NOT NULL ENABLE +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@numericdatatype +POSTHOOK: Output: default@numericdatatype +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 2 + totalSize 2071 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Not Null Constraints +Table: default.numericdatatype +Constraint Name: second_null_constraint +Column Name: a + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +PREHOOK: query: ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 127Y ENABLE +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@numericdatatype +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 127Y ENABLE +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@numericdatatype +POSTHOOK: Output: default@numericdatatype +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 2 + totalSize 2071 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Not Null Constraints +Table: default.numericdatatype +Constraint Name: second_null_constraint +Column Name: a + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +Constraint Name: default_constraint +Column Name:a Default Value:127Y + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(f) values(847.45) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(f) values(847.45) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(847.45)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: 127Y (type: tinyint), 32767S (type: smallint), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), col1 (type: decimal(5,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: enforce_constraint(127Y is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col1 (type: smallint) + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(5,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), CAST( VALUE._col5 AS decimal(9,2)) (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: --plan should have both DEFAULT and NOT NULL +INSERT INTO numericDataType(f) values(847.45) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: --plan should have both DEFAULT and NOT NULL +INSERT INTO numericDataType(f) values(847.45) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SIMPLE [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SCRIPT [] +PREHOOK: query: Select * from numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: Select * from numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +127 32767 2147483647 9223372036854775807 3.4E38 847.45 +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 3 + totalSize 3106 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Not Null Constraints +Table: default.numericdatatype +Constraint Name: second_null_constraint +Column Name: a + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +Constraint Name: default_constraint +Column Name:a Default Value:127Y + +PREHOOK: query: ALTER TABLE numericDataType DROP CONSTRAINT default_constraint +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT +POSTHOOK: query: ALTER TABLE numericDataType DROP CONSTRAINT default_constraint +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 3 + totalSize 3106 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Not Null Constraints +Table: default.numericdatatype +Constraint Name: second_null_constraint +Column Name: a + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +PREHOOK: query: ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 108Y ENABLE +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@numericdatatype +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 108Y ENABLE +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@numericdatatype +POSTHOOK: Output: default@numericdatatype +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + numFiles 3 + totalSize 3106 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Not Null Constraints +Table: default.numericdatatype +Constraint Name: second_null_constraint +Column Name: a + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +Constraint Name: default_constraint +Column Name:a Default Value:108Y + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(f) values(847.45) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(f) values(847.45) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(847.45)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: 108Y (type: tinyint), 32767S (type: smallint), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), col1 (type: decimal(5,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: enforce_constraint(108Y is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col1 (type: smallint) + Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(5,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), CAST( VALUE._col5 AS decimal(9,2)) (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(f) values(847.45) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(f) values(847.45) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SIMPLE [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SCRIPT [] +PREHOOK: query: Select * from numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: Select * from numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +127 32767 2147483647 9223372036854775807 3.4E38 847.45 +108 32767 2147483647 9223372036854775807 3.4E38 847.45 +PREHOOK: query: DROP TABLE numericDataType +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@numericdatatype +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: DROP TABLE numericDataType +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@numericdatatype +POSTHOOK: Output: default@numericdatatype +PREHOOK: query: create table t (i int, j string default + '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t +POSTHOOK: query: create table t (i int, j string default + '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t +PREHOOK: query: desc formatted t +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@t +POSTHOOK: query: desc formatted t +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@t +# col_name data_type comment +i int +j string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.t +Constraint Name: #### A masked pattern was here #### +Column Name:j Default Value:'1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123' + +PREHOOK: query: explain insert into t(i) values(3) +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into t(i) values(3) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(3)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: int), '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123' (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 337 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 337 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.t + Execution mode: llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.t + +PREHOOK: query: insert into t(i) values(3) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t +POSTHOOK: query: insert into t(i) values(3) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t +POSTHOOK: Lineage: t.i SCRIPT [] +POSTHOOK: Lineage: t.j SIMPLE [] +PREHOOK: query: select * from t +PREHOOK: type: QUERY +PREHOOK: Input: default@t +#### A masked pattern was here #### +POSTHOOK: query: select * from t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t +#### A masked pattern was here #### +3 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +PREHOOK: query: drop table t +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t +PREHOOK: Output: default@t +POSTHOOK: query: drop table t +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@t +POSTHOOK: Output: default@t +PREHOOK: query: CREATE TABLE tablePartitioned (a STRING NOT NULL ENFORCED, url STRING constraint bdc1 default 'http://localhost', + c STRING NOT NULL ENFORCED) + PARTITIONED BY (p1 STRING, p2 INT) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tablePartitioned +POSTHOOK: query: CREATE TABLE tablePartitioned (a STRING NOT NULL ENFORCED, url STRING constraint bdc1 default 'http://localhost', + c STRING NOT NULL ENFORCED) + PARTITIONED BY (p1 STRING, p2 INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tablePartitioned +PREHOOK: query: explain INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint') +PREHOOK: type: QUERY +POSTHOOK: query: explain INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct('not','null','constraint')) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: col1 (type: string), col2 (type: string), col3 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (enforce_constraint(_col0 is not null) and enforce_constraint(_col2 is not null)) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tablepartitioned + Execution mode: llap + LLAP IO: no inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + p1 today + p2 10 + replace: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tablepartitioned + +PREHOOK: query: INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tablepartitioned@p1=today/p2=10 +POSTHOOK: query: INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tablepartitioned@p1=today/p2=10 +POSTHOOK: Lineage: tablepartitioned PARTITION(p1=today,p2=10).a SCRIPT [] +POSTHOOK: Lineage: tablepartitioned PARTITION(p1=today,p2=10).c SCRIPT [] +POSTHOOK: Lineage: tablepartitioned PARTITION(p1=today,p2=10).url SCRIPT [] +PREHOOK: query: DROP TABLE tablePartitioned +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@tablepartitioned +PREHOOK: Output: default@tablepartitioned +POSTHOOK: query: DROP TABLE tablePartitioned +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@tablepartitioned +POSTHOOK: Output: default@tablepartitioned +PREHOOK: query: CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@numericDataType +POSTHOOK: query: CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647, + d BIGINT DEFAULT 9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89) + clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@numericDataType +PREHOOK: query: ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint +PREHOOK: type: ALTERTABLE_DROPCONSTRAINT +POSTHOOK: query: ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint +POSTHOOK: type: ALTERTABLE_DROPCONSTRAINT +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(456) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(456) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(456)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: tinyint), col1 (type: int), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), 1234567.89 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToShort(_col1) (type: smallint) + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(9,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), UDFToShort(VALUE._col1) (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(b) values(456) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(b) values(456) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SCRIPT [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SIMPLE [] +PREHOOK: query: SELECT * from numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +PREHOOK: query: ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE +PREHOOK: type: ALTERTABLE_ADDCONSTRAINT +POSTHOOK: query: ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE +POSTHOOK: type: ALTERTABLE_ADDCONSTRAINT +PREHOOK: query: DESC FORMATTED numericDataType +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@numericdatatype +POSTHOOK: query: DESC FORMATTED numericDataType +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@numericdatatype +# col_name data_type comment +a tinyint +b smallint +c int +d bigint +e double +f decimal(9,2) + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + numFiles 1 + totalSize 1035 + transactional true + transactional_properties default +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: 2 +Bucket Columns: [b] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 + +# Constraints + +# Unique Constraints +Table: default.numericdatatype +Constraint Name: uk1 +Column Name:a Key Sequence:1 +Column Name:b Key Sequence:2 + + +# Default Constraints +Table: default.numericdatatype +Constraint Name: #### A masked pattern was here #### +Column Name:b Default Value:32767S + +Constraint Name: #### A masked pattern was here #### +Column Name:c Default Value:2147483647 + +Constraint Name: #### A masked pattern was here #### +Column Name:d Default Value:9223372036854775807L + +Constraint Name: #### A masked pattern was here #### +Column Name:e Default Value:3.4E38 + +Constraint Name: #### A masked pattern was here #### +Column Name:f Default Value:1234567.89 + +PREHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(56) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN INSERT INTO numericDataType(b) values(56) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: array(const struct(56)) (type: array>) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + UDTF Operator + Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + function name: inline + Select Operator + expressions: null (type: tinyint), col1 (type: int), 2147483647 (type: int), 9223372036854775807L (type: bigint), 3.4E38 (type: double), 1234567.89 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Map-reduce partition columns: UDFToShort(_col1) (type: smallint) + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: double), _col5 (type: decimal(9,2)) + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), UDFToShort(VALUE._col1) (type: smallint), VALUE._col2 (type: int), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: decimal(9,2)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 140 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.numericdatatype + Write Type: INSERT + +PREHOOK: query: INSERT INTO numericDataType(b) values(456) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: INSERT INTO numericDataType(b) values(456) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@numericdatatype +POSTHOOK: Lineage: numericdatatype.a SIMPLE [] +POSTHOOK: Lineage: numericdatatype.b SCRIPT [] +POSTHOOK: Lineage: numericdatatype.c SIMPLE [] +POSTHOOK: Lineage: numericdatatype.d SIMPLE [] +POSTHOOK: Lineage: numericdatatype.e SIMPLE [] +POSTHOOK: Lineage: numericdatatype.f SIMPLE [] +PREHOOK: query: SELECT * from numericDataType +PREHOOK: type: QUERY +PREHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +POSTHOOK: query: SELECT * from numericDataType +POSTHOOK: type: QUERY +POSTHOOK: Input: default@numericdatatype +#### A masked pattern was here #### +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +NULL 456 2147483647 9223372036854775807 3.4E38 1234567.89 +PREHOOK: query: DROP TABLE numericDataType +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@numericdatatype +PREHOOK: Output: default@numericdatatype +POSTHOOK: query: DROP TABLE numericDataType +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@numericdatatype +POSTHOOK: Output: default@numericdatatype diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out index 025c484103..f468b90c64 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out @@ -774,10 +774,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -947,10 +947,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -1091,10 +1091,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1235,10 +1235,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1612,22 +1612,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1651,7 +1651,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -1731,9 +1731,9 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs @@ -1741,10 +1741,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -1767,7 +1767,7 @@ STAGE PLANS: Dynamic Partitioning Event Operator Target column: hr (string) Target Input: srcpart - Partition key expr: (UDFToDouble(hr) * 2.0) + Partition key expr: (UDFToDouble(hr) * 2.0D) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Target Vertex: Map 1 Execution mode: llap @@ -1779,7 +1779,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1870,19 +1870,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Execution mode: llap LLAP IO: no inputs @@ -1894,7 +1894,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -1974,9 +1974,9 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs @@ -1984,10 +1984,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2007,7 +2007,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2101,9 +2101,9 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + key expressions: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) sort order: + - Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: no inputs @@ -2111,10 +2111,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2137,7 +2137,7 @@ STAGE PLANS: Dynamic Partitioning Event Operator Target column: hr (string) Target Input: srcpart - Partition key expr: UDFToString((UDFToDouble(hr) * 2.0)) + Partition key expr: UDFToString((UDFToDouble(hr) * 2.0D)) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Target Vertex: Map 1 Execution mode: llap @@ -2149,7 +2149,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + 0 UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) 1 UDFToString(_col0) (type: string) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2375,10 +2375,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D)) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08')) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08')) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -2492,10 +2492,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -3002,10 +3002,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3133,10 +3133,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3153,10 +3153,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ds (type: string), hr (type: string) @@ -4310,10 +4310,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -4447,10 +4447,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -4700,7 +4700,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) input vertices: 1 Map 3 Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE @@ -4719,22 +4719,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -4822,7 +4822,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) input vertices: 1 Map 3 @@ -4842,10 +4842,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -4868,7 +4868,7 @@ STAGE PLANS: Dynamic Partitioning Event Operator Target column: hr (string) Target Input: srcpart - Partition key expr: (UDFToDouble(hr) * 2.0) + Partition key expr: (UDFToDouble(hr) * 2.0D) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Target Vertex: Map 1 Execution mode: llap @@ -5427,10 +5427,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -5523,10 +5523,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -5553,10 +5553,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ds (type: string), hr (type: string) @@ -5930,10 +5930,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out index 8bce445038..563a343a3f 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_2.q.out @@ -582,10 +582,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: c - filterExpr: ((UDFToDouble(key) < 0.0) and value is not null and (value BETWEEN DynamicValue(RS_21_x__col1_min) AND DynamicValue(RS_21_x__col1_max) and in_bloom_filter(value, DynamicValue(RS_21_x__col1_bloom_filter)))) (type: boolean) + filterExpr: ((UDFToDouble(key) < 0.0D) and value is not null and (value BETWEEN DynamicValue(RS_21_x__col1_min) AND DynamicValue(RS_21_x__col1_max) and in_bloom_filter(value, DynamicValue(RS_21_x__col1_bloom_filter)))) (type: boolean) Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) < 0.0) and (value BETWEEN DynamicValue(RS_21_x__col1_min) AND DynamicValue(RS_21_x__col1_max) and in_bloom_filter(value, DynamicValue(RS_21_x__col1_bloom_filter))) and value is not null) (type: boolean) + predicate: ((UDFToDouble(key) < 0.0D) and (value BETWEEN DynamicValue(RS_21_x__col1_min) AND DynamicValue(RS_21_x__col1_max) and in_bloom_filter(value, DynamicValue(RS_21_x__col1_bloom_filter))) and value is not null) (type: boolean) Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out index aca6b21dba..4a8192d044 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_3.q.out @@ -202,7 +202,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) @@ -770,7 +770,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out index 15a97a78c1..be8747a426 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out @@ -175,7 +175,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -255,7 +255,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -352,7 +352,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -430,7 +430,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -576,7 +576,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -656,7 +656,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -753,7 +753,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -831,7 +831,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -1364,7 +1364,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -1477,7 +1477,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -1579,7 +1579,7 @@ STAGE PLANS: Number of rows: 10 Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col0 = 27) or _col0 is null) (type: boolean) + predicate: ((_col0 = 27Y) or _col0 is null) (type: boolean) Statistics: Num rows: 5 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) @@ -1657,7 +1657,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) @@ -1776,7 +1776,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) @@ -2165,7 +2165,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -2278,7 +2278,7 @@ STAGE PLANS: alias: over1k_orc Statistics: Num rows: 1049 Data size: 25160 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 11 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out index 78b9a443b8..5adce3291a 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out @@ -132,7 +132,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -212,7 +212,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -309,7 +309,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -387,7 +387,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -533,7 +533,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -613,7 +613,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -710,7 +710,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -788,7 +788,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -1321,7 +1321,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -1434,7 +1434,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -1536,7 +1536,7 @@ STAGE PLANS: Number of rows: 10 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 = 27) or _col0 is null) (type: boolean) + predicate: ((_col0 = 27Y) or _col0 is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) @@ -1614,7 +1614,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) @@ -1733,7 +1733,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) @@ -2122,7 +2122,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -2235,7 +2235,7 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((t = 27) or t is null) (type: boolean) + predicate: ((t = 27Y) or t is null) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) @@ -2687,10 +2687,10 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (t = 27) (type: boolean) + predicate: (t = 27Y) (type: boolean) Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27 (type: tinyint), i (type: int) + expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27Y (type: tinyint), i (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -2847,10 +2847,10 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((i = 100) and (t = 27)) (type: boolean) + predicate: ((i = 100) and (t = 27Y)) (type: boolean) Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27 (type: tinyint), 100 (type: int) + expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27Y (type: tinyint), 100 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -3007,10 +3007,10 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((s = 'foo') and (t = 27)) (type: boolean) + predicate: ((s = 'foo') and (t = 27Y)) (type: boolean) Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), i (type: int) + expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27Y (type: tinyint), i (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -3087,10 +3087,10 @@ STAGE PLANS: alias: over1k Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((i = 100) and (s = 'foo') and (t = 27)) (type: boolean) + predicate: ((i = 100) and (s = 'foo') and (t = 27Y)) (type: boolean) Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), 100 (type: int) + expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27Y (type: tinyint), 100 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out index 7b1dd40a16..57f0cf6d85 100644 --- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out +++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out @@ -4520,7 +4520,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) @@ -4805,7 +4805,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) @@ -5076,7 +5076,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) @@ -5333,7 +5333,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) diff --git a/ql/src/test/results/clientpositive/llap/except_distinct.q.out b/ql/src/test/results/clientpositive/llap/except_distinct.q.out index 1e5937a22d..cc35b120fc 100644 --- a/ql/src/test/results/clientpositive/llap/except_distinct.q.out +++ b/ql/src/test/results/clientpositive/llap/except_distinct.q.out @@ -275,7 +275,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -304,7 +304,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -327,7 +327,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -453,7 +453,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -482,7 +482,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -511,7 +511,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -540,7 +540,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -553,7 +553,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 41 Data size: 7626 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -582,7 +582,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 145 Data size: 28130 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 24 Data size: 4656 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -595,7 +595,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 4464 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 4656 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -624,7 +624,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 137 Data size: 26578 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 22 Data size: 4268 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -647,7 +647,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -762,7 +762,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), 2 (type: bigint), _col1 (type: bigint) + expressions: _col0 (type: int), 2L (type: bigint), _col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -791,7 +791,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((_col1 * 2) = _col2) and (_col1 > 0)) (type: boolean) + predicate: (((_col1 * 2) = _col2) and (_col1 > 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -819,7 +819,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), 1 (type: bigint), _col1 (type: bigint) + expressions: _col0 (type: int), 1L (type: bigint), _col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out index 45b2b4f832..98743eb9db 100644 --- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out @@ -477,7 +477,7 @@ Stage-0 Select Operator [SEL_23] (rows=1 width=20) Output:["_col1","_col4"] Merge Join Operator [MERGEJOIN_41] (rows=1 width=20) - Conds:RS_19._col0=RS_20._col0(Inner),RS_20._col0=RS_21._col0(Inner),Output:["_col1","_col3","_col4","_col6"],residual filter predicates:{((_col1 >= 1) or (_col4 >= 1))} {((UDFToLong(_col1) + _col4) >= 0)} {((_col3 + _col6) >= 0)} + Conds:RS_19._col0=RS_20._col0(Inner),RS_20._col0=RS_21._col0(Inner),Output:["_col1","_col3","_col4","_col6"],residual filter predicates:{((_col1 >= 1) or (_col4 >= 1L))} {((UDFToLong(_col1) + _col4) >= 0)} {((_col3 + _col6) >= 0)} <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_19] PartitionCols:_col0 @@ -624,7 +624,7 @@ Stage-0 Select Operator [SEL_23] (rows=1 width=20) Output:["_col1","_col4"] Merge Join Operator [MERGEJOIN_40] (rows=1 width=20) - Conds:RS_19._col0=RS_20._col0(Inner),RS_20._col0=RS_21._col0(Inner),Output:["_col1","_col3","_col4","_col6"],residual filter predicates:{((_col1 >= 1) or (_col4 >= 1))} {((UDFToLong(_col1) + _col4) >= 0)} {((_col3 + _col6) >= 0)} + Conds:RS_19._col0=RS_20._col0(Inner),RS_20._col0=RS_21._col0(Inner),Output:["_col1","_col3","_col4","_col6"],residual filter predicates:{((_col1 >= 1) or (_col4 >= 1L))} {((UDFToLong(_col1) + _col4) >= 0)} {((_col3 + _col6) >= 0)} <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_19] PartitionCols:_col0 @@ -929,7 +929,7 @@ Stage-0 Select Operator [SEL_2] (rows=6 width=85) Output:["_col0"] Filter Operator [FIL_13] (rows=6 width=85) - predicate:(UDFToDouble(key) >= 1.0) + predicate:(UDFToDouble(key) >= 1.0D) TableScan [TS_0] (rows=20 width=80) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 3 [SIMPLE_EDGE] llap @@ -938,7 +938,7 @@ Stage-0 Select Operator [SEL_5] (rows=6 width=85) Output:["_col0"] Filter Operator [FIL_14] (rows=6 width=85) - predicate:(UDFToDouble(key) >= 1.0) + predicate:(UDFToDouble(key) >= 1.0D) TableScan [TS_3] (rows=20 width=80) default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] @@ -1565,7 +1565,7 @@ Stage-0 Select Operator [SEL_19] (rows=6 width=85) Output:["_col0"] Filter Operator [FIL_44] (rows=6 width=85) - predicate:(UDFToDouble(key) > 0.0) + predicate:(UDFToDouble(key) > 0.0D) TableScan [TS_17] (rows=20 width=80) default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Reducer 2 [SIMPLE_EDGE] llap @@ -1574,7 +1574,7 @@ Stage-0 Select Operator [SEL_8] (rows=1 width=93) Output:["_col0","_col1"] Filter Operator [FIL_7] (rows=1 width=101) - predicate:(((UDFToDouble(_col2) + UDFToDouble(_col3)) >= 0.0) and ((UDFToDouble(_col2) >= 1.0) or (_col3 >= 1))) + predicate:(((UDFToDouble(_col2) + UDFToDouble(_col3)) >= 0.0D) and ((UDFToDouble(_col2) >= 1.0D) or (_col3 >= 1L))) Select Operator [SEL_6] (rows=1 width=101) Output:["_col2","_col3"] Group By Operator [GBY_5] (rows=1 width=101) @@ -1585,7 +1585,7 @@ Stage-0 Group By Operator [GBY_3] (rows=1 width=101) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float Filter Operator [FIL_42] (rows=1 width=93) - predicate:((((c_int + 1) + 1) >= 0) and (((c_int + 1) > 0) or (UDFToDouble(key) >= 0.0)) and ((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0) and (c_float > 0)) + predicate:((((c_int + 1) + 1) >= 0) and (((c_int + 1) > 0) or (UDFToDouble(key) >= 0.0D)) and ((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0D) and (c_float > 0)) TableScan [TS_0] (rows=20 width=88) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] <-Reducer 7 [SIMPLE_EDGE] llap @@ -1603,7 +1603,7 @@ Stage-0 Group By Operator [GBY_12] (rows=1 width=93) Output:["_col0","_col1","_col2"],keys:key, c_int, c_float Filter Operator [FIL_43] (rows=1 width=93) - predicate:(((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0) and (c_float > 0)) + predicate:(((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0D) and (c_float > 0)) TableScan [TS_9] (rows=20 width=88) default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] @@ -2218,7 +2218,7 @@ Stage-0 Select Operator [SEL_23] (rows=500 width=178) Output:["_col0","_col1"] Filter Operator [FIL_22] (rows=500 width=195) - predicate:((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) + predicate:((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) Merge Join Operator [MERGEJOIN_31] (rows=500 width=195) Conds:RS_19._col0=RS_20._col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5"] <-Reducer 2 [SIMPLE_EDGE] llap @@ -2291,7 +2291,7 @@ Stage-0 Select Operator [SEL_25] (rows=13 width=223) Output:["_col0","_col1","_col2"] Filter Operator [FIL_24] (rows=13 width=231) - predicate:(not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) + predicate:(not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) Merge Join Operator [MERGEJOIN_32] (rows=26 width=230) Conds:RS_21._col0, _col1=RS_22._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col8"] <-Reducer 2 [SIMPLE_EDGE] llap @@ -2378,7 +2378,7 @@ Stage-0 Select Operator [SEL_28] (rows=26 width=125) Output:["_col0","_col1"] Filter Operator [FIL_27] (rows=26 width=141) - predicate:((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) + predicate:((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) Merge Join Operator [MERGEJOIN_37] (rows=26 width=141) Conds:RS_24.UDFToDouble(_col1)=RS_25._col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5"] <-Reducer 6 [SIMPLE_EDGE] llap @@ -2457,7 +2457,7 @@ Stage-0 Select Operator [SEL_34] (rows=3 width=106) Output:["_col0","_col1"] Filter Operator [FIL_33] (rows=3 width=119) - predicate:(not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) + predicate:(not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) Merge Join Operator [MERGEJOIN_44] (rows=5 width=114) Conds:RS_30._col0, _col1=RS_31._col0, _col1(Left Outer),Output:["_col0","_col1","_col3","_col4","_col7"] <-Reducer 3 [SIMPLE_EDGE] llap @@ -2492,7 +2492,7 @@ Stage-0 Select Operator [SEL_12] (rows=1 width=114) Output:["_col0","_col1"] Filter Operator [FIL_40] (rows=1 width=114) - predicate:(((_col2 - _col1) > 600.0) and _col1 is not null) + predicate:(((_col2 - _col1) > 600.0D) and _col1 is not null) Group By Operator [GBY_10] (rows=5 width=114) Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)"],keys:KEY._col0 <-Map 1 [SIMPLE_EDGE] llap @@ -2511,7 +2511,7 @@ Stage-0 Select Operator [SEL_24] (rows=1 width=110) Output:["_col0","_col1"] Filter Operator [FIL_41] (rows=1 width=114) - predicate:(((_col2 - _col1) > 600.0) and _col1 is not null) + predicate:(((_col2 - _col1) > 600.0D) and _col1 is not null) Group By Operator [GBY_22] (rows=5 width=114) Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)"],keys:KEY._col0 <-Map 1 [SIMPLE_EDGE] llap @@ -5518,7 +5518,7 @@ Stage-0 Select Operator [SEL_9] (rows=550 width=87) Output:["_col0","_col1","_col2"] Map Join Operator [MAPJOIN_21] (rows=550 width=87) - Conds:RS_6.UDFToDouble(_col0)=SEL_5.(UDFToDouble(_col0) + 1.0)(Inner),Output:["_col0","_col1","_col2"] + Conds:RS_6.UDFToDouble(_col0)=SEL_5.(UDFToDouble(_col0) + 1.0D)(Inner),Output:["_col0","_col1","_col2"] <-Map 1 [BROADCAST_EDGE] llap BROADCAST [RS_6] PartitionCols:UDFToDouble(_col0) @@ -5757,10 +5757,10 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=404) Output:["_col0","_col1","_col2"] Map Join Operator [MAPJOIN_17] (rows=1 width=404) - Conds:SEL_1.UDFToDouble(_col0)=RS_5.(UDFToDouble(_col0) + 1.0)(Left Outer),Output:["_col0","_col1","_col2"] + Conds:SEL_1.UDFToDouble(_col0)=RS_5.(UDFToDouble(_col0) + 1.0D)(Left Outer),Output:["_col0","_col1","_col2"] <-Map 4 [BROADCAST_EDGE] llap BROADCAST [RS_5] - PartitionCols:(UDFToDouble(_col0) + 1.0) + PartitionCols:(UDFToDouble(_col0) + 1.0D) Select Operator [SEL_3] (rows=1 width=184) Output:["_col0"] TableScan [TS_2] (rows=1 width=184) @@ -5799,7 +5799,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=404) Output:["_col0","_col1","_col2"] Map Join Operator [MAPJOIN_17] (rows=1 width=404) - Conds:RS_4.UDFToDouble(_col0)=SEL_3.(UDFToDouble(_col0) + 1.0)(Right Outer),Output:["_col0","_col1","_col2"] + Conds:RS_4.UDFToDouble(_col0)=SEL_3.(UDFToDouble(_col0) + 1.0D)(Right Outer),Output:["_col0","_col1","_col2"] <-Map 1 [BROADCAST_EDGE] llap BROADCAST [RS_4] PartitionCols:UDFToDouble(_col0) @@ -5841,7 +5841,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=404) Output:["_col0","_col1","_col2"] Merge Join Operator [MERGEJOIN_15] (rows=1 width=404) - Conds:RS_4.UDFToDouble(_col0)=RS_5.(UDFToDouble(_col0) + 1.0)(Outer),Output:["_col0","_col1","_col2"] + Conds:RS_4.UDFToDouble(_col0)=RS_5.(UDFToDouble(_col0) + 1.0D)(Outer),Output:["_col0","_col1","_col2"] <-Map 1 [SIMPLE_EDGE] llap SHUFFLE [RS_4] PartitionCols:UDFToDouble(_col0) @@ -5851,7 +5851,7 @@ Stage-0 default@t1,a,Tbl:COMPLETE,Col:NONE,Output:["key","val"] <-Map 5 [SIMPLE_EDGE] llap SHUFFLE [RS_5] - PartitionCols:(UDFToDouble(_col0) + 1.0) + PartitionCols:(UDFToDouble(_col0) + 1.0D) Select Operator [SEL_3] (rows=1 width=184) Output:["_col0"] TableScan [TS_2] (rows=1 width=184) @@ -5886,7 +5886,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=202) Output:["_col0","_col1"] Map Join Operator [MAPJOIN_17] (rows=1 width=202) - Conds:SEL_1.(UDFToDouble(_col0) + 1.0)=RS_5.UDFToDouble(_col0)(Left Outer),Output:["_col0","_col2"] + Conds:SEL_1.(UDFToDouble(_col0) + 1.0D)=RS_5.UDFToDouble(_col0)(Left Outer),Output:["_col0","_col2"] <-Map 4 [BROADCAST_EDGE] llap BROADCAST [RS_5] PartitionCols:UDFToDouble(_col0) diff --git a/ql/src/test/results/clientpositive/llap/groupby_rollup_empty.q.out b/ql/src/test/results/clientpositive/llap/groupby_rollup_empty.q.out index 24be36ea36..364c15a05b 100644 --- a/ql/src/test/results/clientpositive/llap/groupby_rollup_empty.q.out +++ b/ql/src/test/results/clientpositive/llap/groupby_rollup_empty.q.out @@ -175,7 +175,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2) - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/having.q.out b/ql/src/test/results/clientpositive/llap/having.q.out index 75de96ab59..7efb72154b 100644 --- a/ql/src/test/results/clientpositive/llap/having.q.out +++ b/ql/src/test/results/clientpositive/llap/having.q.out @@ -51,7 +51,7 @@ STAGE PLANS: outputColumnNames: _col1 Statistics: Num rows: 250 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 > 3) (type: boolean) + predicate: (_col1 > 3L) (type: boolean) Statistics: Num rows: 83 Data size: 664 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: bigint) @@ -111,7 +111,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(key) <> 302.0) (type: boolean) + predicate: (UDFToDouble(key) <> 302.0D) (type: boolean) Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(value) @@ -764,7 +764,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(key) > 300.0) (type: boolean) + predicate: (UDFToDouble(key) > 300.0D) (type: boolean) Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: max(value) @@ -1264,7 +1264,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 >= 4) (type: boolean) + predicate: (_col1 >= 4L) (type: boolean) Statistics: Num rows: 83 Data size: 7885 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/llap/intersect_all.q.out b/ql/src/test/results/clientpositive/llap/intersect_all.q.out index 20a234e53b..9c67a99bb5 100644 --- a/ql/src/test/results/clientpositive/llap/intersect_all.q.out +++ b/ql/src/test/results/clientpositive/llap/intersect_all.q.out @@ -232,7 +232,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 48500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col3 = 2) (type: boolean) + predicate: (_col3 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: bigint), _col0 (type: string), _col1 (type: string) @@ -954,7 +954,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 97000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col3 = 4) (type: boolean) + predicate: (_col3 = 4L) (type: boolean) Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: bigint), _col0 (type: string), _col1 (type: string) @@ -1629,7 +1629,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 2) (type: boolean) + predicate: (_col2 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: bigint), _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out b/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out index a5384eb196..444c222da4 100644 --- a/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out +++ b/ql/src/test/results/clientpositive/llap/intersect_distinct.q.out @@ -230,7 +230,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 46500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 2) (type: boolean) + predicate: (_col2 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 186 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -754,7 +754,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 93000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 4) (type: boolean) + predicate: (_col2 = 4L) (type: boolean) Statistics: Num rows: 1 Data size: 186 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -1231,7 +1231,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/intersect_merge.q.out b/ql/src/test/results/clientpositive/llap/intersect_merge.q.out index 6f0e7f1f66..f69d074bef 100644 --- a/ql/src/test/results/clientpositive/llap/intersect_merge.q.out +++ b/ql/src/test/results/clientpositive/llap/intersect_merge.q.out @@ -243,7 +243,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 5) (type: boolean) + predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -471,7 +471,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 4) (type: boolean) + predicate: (_col2 = 4L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -744,7 +744,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 5) (type: boolean) + predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -1017,7 +1017,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 5) (type: boolean) + predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -1290,7 +1290,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 5) (type: boolean) + predicate: (_col2 = 5L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -1473,7 +1473,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 3) (type: boolean) + predicate: (_col2 = 3L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -1656,7 +1656,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col3 = 3) (type: boolean) + predicate: (_col3 = 3L) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: bigint), _col0 (type: int), _col1 (type: int) @@ -1830,7 +1830,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col3 = 2) (type: boolean) + predicate: (_col3 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: bigint), _col0 (type: int), _col1 (type: int) @@ -1881,7 +1881,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 = 2) (type: boolean) + predicate: (_col2 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out index 3ef7801863..c5802742e5 100644 --- a/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out +++ b/ql/src/test/results/clientpositive/llap/jdbc_handler.q.out @@ -144,7 +144,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) - 1.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) - 1.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: key (type: string) diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out index 72d3d398c2..fe6b4f96e0 100644 --- a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out @@ -196,7 +196,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -292,7 +292,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out index 051b2dbe1f..97686cb5d1 100644 --- a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out +++ b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out @@ -197,7 +197,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -307,7 +307,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator diff --git a/ql/src/test/results/clientpositive/llap/lineage2.q.out b/ql/src/test/results/clientpositive/llap/lineage2.q.out index 4f59a4d59b..4fb586d58a 100644 --- a/ql/src/test/results/clientpositive/llap/lineage2.q.out +++ b/ql/src/test/results/clientpositive/llap/lineage2.q.out @@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"e07e602503383cf2b8477d43c5043f35","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > 10.0) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"e07e602503383cf2b8477d43c5043f35","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > 10.0D) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} 146 val_146 150 val_150 213 val_213 @@ -634,7 +634,7 @@ having count(a.c2) > 0 PREHOOK: type: QUERY PREHOOK: Input: default@dest_l2 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"4e60ca1e72d985639b2027021a199297","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col $hdt$_0) id))))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"sum_window_0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"4e60ca1e72d985639b2027021a199297","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col $hdt$_0) id))))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0L)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"sum_window_0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]} 1 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3 from dest_l2 a join dest_l3 b on (a.id = b.id) @@ -646,7 +646,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@dest_l2 PREHOOK: Input: default@dest_l3 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"0dc990e844acc6c8309f674a4ca281d2","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and a.id is not null)","edgeType":"PREDICATE"},{"sources":[9,10],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,2,3],"expression":"((b.c3 > 0) and b.id is not null)","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"0dc990e844acc6c8309f674a4ca281d2","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,9],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and a.id is not null)","edgeType":"PREDICATE"},{"sources":[9,10],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,2,3],"expression":"((b.c3 > 0) and b.id is not null)","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0L)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"}]} 1 1 s2 15 PREHOOK: query: drop table if exists t PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/llap/lineage3.q.out b/ql/src/test/results/clientpositive/llap/lineage3.q.out index ca4405caf7..d1e321e1f4 100644 --- a/ql/src/test/results/clientpositive/llap/lineage3.q.out +++ b/ql/src/test/results/clientpositive/llap/lineage3.q.out @@ -25,7 +25,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Output: default@d1 PREHOOK: Output: default@d2 -{"version":"1.0","engine":"tez","database":"default","hash":"2a73773c7e9a8a03f68ce31ed3972ed0","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1],"expression":"a.cint is not null","edgeType":"PREDICATE"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"b.cbigint is not null","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"compute_stats(UDFToInteger(x), 'hll')","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"2a73773c7e9a8a03f68ce31ed3972ed0","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1],"expression":"a.cint is not null","edgeType":"PREDICATE"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"b.cbigint is not null","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0Y)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"compute_stats(UDFToInteger(x), 'hll')","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]} PREHOOK: query: drop table if exists t PREHOOK: type: DROPTABLE PREHOOK: query: create table t as @@ -61,7 +61,7 @@ having min(cbigint) > 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Output: default@dest_l1@ds=tomorrow -{"version":"1.0","engine":"tez","database":"default","hash":"d9978d282bef250892cc64adb1c06356","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[3],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[5,3],"targets":[0,1,2],"expression":"(alltypesorc.cboolean1 and alltypesorc.cint is not null)","edgeType":"PREDICATE"},{"sources":[6],"targets":[0,1,2],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"},{"sources":[3],"targets":[0],"expression":"compute_stats(min(default.alltypesorc.cint), 'hll')","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"compute_stats(CAST( min(default.alltypesorc.cstring1) AS varchar(128)), 'hll')","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"'tomorrow'","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l1.ds"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"d9978d282bef250892cc64adb1c06356","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[3],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[5,3],"targets":[0,1,2],"expression":"(alltypesorc.cboolean1 and alltypesorc.cint is not null)","edgeType":"PREDICATE"},{"sources":[6],"targets":[0,1,2],"expression":"(min(default.alltypesorc.cbigint) > 10L)","edgeType":"PREDICATE"},{"sources":[3],"targets":[0],"expression":"compute_stats(min(default.alltypesorc.cint), 'hll')","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"compute_stats(CAST( min(default.alltypesorc.cstring1) AS varchar(128)), 'hll')","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"'tomorrow'","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l1.ds"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]} PREHOOK: query: select cint, rank() over(order by cint) from alltypesorc where cint > 10 and cint < 10000 limit 10 PREHOOK: type: QUERY @@ -116,7 +116,7 @@ order by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"fd17992d1c081f6aa3cd7d5c99799748","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n (select ctinyint, cbigint from alltypesorc\n union all\n select ctinyint, cbigint from alltypesorc) a\n inner join\n alltypesorc b\n on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,4],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = b.ctinyint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((b.ctinyint < 100) and b.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"fd17992d1c081f6aa3cd7d5c99799748","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n (select ctinyint, cbigint from alltypesorc\n union all\n select ctinyint, cbigint from alltypesorc) a\n inner join\n alltypesorc b\n on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,4],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100Y) and alltypesorc.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = b.ctinyint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((b.ctinyint < 100Y) and b.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]} -2147311592 -51 -1071480828 -51 -2147311592 -51 -1071480828 -51 -2147311592 -51 -1067683781 -51 @@ -135,7 +135,7 @@ and x.ctinyint + length(c.cstring2) < 1000 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"15e00f9e88c1ad6b2f53a33a0c147f0e","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n select a.ctinyint ctinyint, b.cint cint\n from (select * from alltypesorc a where cboolean1=false) a\n join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - 100)","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(CAST( c.cint AS decimal(11,1)) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((CAST( b.cint AS decimal(11,1)) < 4.5) and b.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((b.cbigint - 224870380) = UDFToLong(a.cint))","edgeType":"PREDICATE"},{"sources":[8,4,5],"targets":[0,1,2,3],"expression":"((a.cboolean1 = false) and (a.ctinyint > 10) and a.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"_c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"15e00f9e88c1ad6b2f53a33a0c147f0e","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n select a.ctinyint ctinyint, b.cint cint\n from (select * from alltypesorc a where cboolean1=false) a\n join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - 100L)","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(CAST( c.cint AS decimal(11,1)) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((CAST( b.cint AS decimal(11,1)) < 4.5) and b.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((b.cbigint - 224870380) = UDFToLong(a.cint))","edgeType":"PREDICATE"},{"sources":[8,4,5],"targets":[0,1,2,3],"expression":"((a.cboolean1 = false) and (a.ctinyint > 10Y) and a.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"_c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]} 11 -654374827 857266369 OEfPnHnIYueoup PREHOOK: query: select c1, x2, x3 from ( @@ -158,7 +158,7 @@ order by x2, c1 desc PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"0b26439f53dcab4e9429ed292244c371","queryText":"select c1, x2, x3\nfrom (\n select c1, min(c2) x2, sum(c3) x3\n from (\n select c1, c2, c3\n from (\n select cint c1, ctinyint c2, min(cbigint) c3\n from alltypesorc\n where cint is not null\n group by cint, ctinyint\n order by cint, ctinyint\n limit 5\n ) x\n ) x2\n group by c1\n) y\nwhere x2 > 0\norder by x2, c1 desc","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"expression":"sum(min(default.alltypesorc.cbigint))","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1,2],"expression":"alltypesorc.cint is not null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2],"expression":"(min(default.alltypesorc.ctinyint) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c1"},{"id":1,"vertexType":"COLUMN","vertexId":"x2"},{"id":2,"vertexType":"COLUMN","vertexId":"x3"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"0b26439f53dcab4e9429ed292244c371","queryText":"select c1, x2, x3\nfrom (\n select c1, min(c2) x2, sum(c3) x3\n from (\n select c1, c2, c3\n from (\n select cint c1, ctinyint c2, min(cbigint) c3\n from alltypesorc\n where cint is not null\n group by cint, ctinyint\n order by cint, ctinyint\n limit 5\n ) x\n ) x2\n group by c1\n) y\nwhere x2 > 0\norder by x2, c1 desc","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"min(default.alltypesorc.ctinyint)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"expression":"sum(min(default.alltypesorc.cbigint))","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1,2],"expression":"alltypesorc.cint is not null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1,2],"expression":"(min(default.alltypesorc.ctinyint) > 0Y)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c1"},{"id":1,"vertexType":"COLUMN","vertexId":"x2"},{"id":2,"vertexType":"COLUMN","vertexId":"x3"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]} -1072910839 11 2048385991 -1073279343 11 -1595604468 PREHOOK: query: select key, value from src1 @@ -166,7 +166,7 @@ where key in (select key+18 from src1) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"06c63ecdebcc1ca975a34b0fe1b4bf38","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"src1.key is not null","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + 18.0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"06c63ecdebcc1ca975a34b0fe1b4bf38","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"src1.key is not null","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + 18.0D))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]} 146 val_146 273 val_273 PREHOOK: query: select * from src1 a @@ -178,7 +178,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"94e9cc0a67801fe1503a3cb0c5029d59","queryText":"select * from src1 a\nwhere exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(a.key = a.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(UDFToDouble((UDFToInteger(b.ctinyint) + 300)) = UDFToDouble(a.key))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"94e9cc0a67801fe1503a3cb0c5029d59","queryText":"select * from src1 a\nwhere exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0D)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(a.key = a.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(UDFToDouble((UDFToInteger(b.ctinyint) + 300)) = UDFToDouble(a.key))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 311 val_311 Warning: Shuffle Join MERGEJOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select key, value from src1 @@ -186,7 +186,7 @@ where key not in (select key+18 from src1) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"cbc4367150807328dda0f1cf4c74b811","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + 18.0))","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"((count(*) = 0) or (true is null and src1.key is not null and (count((UDFToDouble(src1.key) + 18.0)) >= count(*))))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"cbc4367150807328dda0f1cf4c74b811","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + 18.0D))","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"((count(*) = 0L) or (true is null and src1.key is not null and (count((UDFToDouble(src1.key) + 18.0D)) >= count(*))))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]} PREHOOK: query: select * from src1 a where not exists (select cint from alltypesorc b @@ -196,7 +196,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src1 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"723e79692e1de404c4ffb702097586da","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(a.key = a.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(UDFToDouble((UDFToInteger(b.ctinyint) + 300)) = UDFToDouble(a.key))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"723e79692e1de404c4ffb702097586da","queryText":"select * from src1 a\nwhere not exists\n (select cint from alltypesorc b\n where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0D)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(a.key = a.key)","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(UDFToDouble((UDFToInteger(b.ctinyint) + 300)) = UDFToDouble(a.key))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 369 401 val_401 406 val_406 @@ -251,7 +251,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@dest_v1 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"2baf9018d042c81043d25d70b4411308","queryText":"select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t\nwhere ctinyint > 10 order by ctinyint limit 2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"},{"sources":[1],"targets":[0],"expression":"(alltypesorc.ctinyint > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"t.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"2baf9018d042c81043d25d70b4411308","queryText":"select t.ctinyint from (select * from dest_v1 where ctinyint is not null) t\nwhere ctinyint > 10 order by ctinyint limit 2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"},{"sources":[1],"targets":[0],"expression":"(alltypesorc.ctinyint > 10Y)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"t.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]} 11 11 PREHOOK: query: drop view if exists dest_v2 @@ -297,7 +297,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@dest_v3 -{"version":"1.0","engine":"tez","database":"default","hash":"9848a9a38a4f6f031dc669e7e495f9ee","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n from alltypesorc c\n join (\n select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n from ( select * from alltypesorc a where cboolean1=true ) a\n join alltypesorc b on (a.csmallint = b.cint)\n ) x on (x.ctinyint = c.cbigint)\n where x.csmallint=11\n and x.cint > 899\n and x.cfloat > 4.5\n and c.cstring1 < '7'\n and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[],"targets":[0],"expression":"11","edgeType":"PROJECTION"},{"sources":[7],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[11],"targets":[6],"edgeType":"PROJECTION"},{"sources":[11,7],"targets":[0,1,3,2,4,5,6],"expression":"((c.cstring1 < '7') and c.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[7,8],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(a.ctinyint))","edgeType":"PREDICATE"},{"sources":[10,9],"targets":[0,1,3,2,4,5,6],"expression":"((b.cfloat > 4.5) and (11 = b.cint))","edgeType":"PREDICATE"},{"sources":[12,13,9,8],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 and (a.csmallint = 11) and (a.cint > 899) and a.ctinyint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"9848a9a38a4f6f031dc669e7e495f9ee","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n from alltypesorc c\n join (\n select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n from ( select * from alltypesorc a where cboolean1=true ) a\n join alltypesorc b on (a.csmallint = b.cint)\n ) x on (x.ctinyint = c.cbigint)\n where x.csmallint=11\n and x.cint > 899\n and x.cfloat > 4.5\n and c.cstring1 < '7'\n and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[],"targets":[0],"expression":"11S","edgeType":"PROJECTION"},{"sources":[7],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[3],"edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[11],"targets":[6],"edgeType":"PROJECTION"},{"sources":[11,7],"targets":[0,1,3,2,4,5,6],"expression":"((c.cstring1 < '7') and c.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[7,8],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(a.ctinyint))","edgeType":"PREDICATE"},{"sources":[10,9],"targets":[0,1,3,2,4,5,6],"expression":"((b.cfloat > 4.5) and (11 = b.cint))","edgeType":"PREDICATE"},{"sources":[12,13,9,8],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 and (a.csmallint = 11S) and (a.cint > 899) and a.ctinyint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"}]} PREHOOK: query: alter view dest_v3 as select * from ( select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a, @@ -311,13 +311,13 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@dest_v3 -{"version":"1.0","engine":"tez","database":"default","hash":"aaef165dff0212060ba950bfed68061d","queryText":"alter view dest_v3 as\n select * from (\n select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n count(b.cstring1) x, b.cboolean1\n from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n where a.cboolean2 = true and b.cfloat > 0\n group by a.ctinyint, a.csmallint, b.cboolean1\n having count(a.cint) > 10\n order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col a) csmallint)))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"aaef165dff0212060ba950bfed68061d","queryText":"alter view dest_v3 as\n select * from (\n select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n count(b.cstring1) x, b.cboolean1\n from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n where a.cboolean2 = true and b.cfloat > 0\n group by a.ctinyint, a.csmallint, b.cboolean1\n having count(a.cint) > 10\n order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col a) csmallint)))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10L)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} PREHOOK: query: select * from dest_v3 limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@dest_v3 #### A masked pattern was here #### -{"version":"1.0","engine":"tez","database":"default","hash":"fd4e0dd59f42b53fc07125817451df49","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col $hdt$_0) csmallint))))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8,7],"targets":[0,1,2],"expression":"(a.cboolean2 and a.cint is not null)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[9,7],"targets":[0,1,2],"expression":"((b.cfloat > 0) and b.cint is not null)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} +{"version":"1.0","engine":"tez","database":"default","hash":"fd4e0dd59f42b53fc07125817451df49","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col $hdt$_0) csmallint))))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8,7],"targets":[0,1,2],"expression":"(a.cboolean2 and a.cint is not null)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[9,7],"targets":[0,1,2],"expression":"((b.cfloat > 0) and b.cint is not null)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10L)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} 38 216 false 38 229 true PREHOOK: query: drop table if exists src_dp diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out index 768b8048c0..3236d00e2c 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb.q.out @@ -690,7 +690,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((d_year = 1993) and (lo_quantity < 25.0) and lo_discount BETWEEN 1.0 AND 3.0) (type: boolean) + predicate: ((d_year = 1993) and (lo_quantity < 25.0D) and lo_discount BETWEEN 1.0D AND 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -769,7 +769,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((d_yearmonthnum = 199401) and lo_discount BETWEEN 4.0 AND 6.0 and lo_quantity BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((d_yearmonthnum = 199401) and lo_discount BETWEEN 4.0D AND 6.0D and lo_quantity BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -850,7 +850,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((d_weeknuminyear = 6) and (d_year = 1994) and lo_discount BETWEEN 5.0 AND 7.0 and lo_quantity BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((d_weeknuminyear = 6) and (d_year = 1994) and lo_discount BETWEEN 5.0D AND 7.0D and lo_quantity BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: discounted_price (type: double) diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out index 6c7e445110..36ecc1388a 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_ssb_2.q.out @@ -692,7 +692,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 560 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(lo_quantity) < 25.0) and (UDFToInteger(d_year) = 1993) and UDFToDouble(lo_discount) BETWEEN 1.0 AND 3.0) (type: boolean) + predicate: ((UDFToDouble(lo_quantity) < 25.0D) and (UDFToInteger(d_year) = 1993) and UDFToDouble(lo_discount) BETWEEN 1.0D AND 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 560 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -771,7 +771,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 560 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToInteger(d_yearmonthnum) = 199401) and UDFToDouble(lo_discount) BETWEEN 4.0 AND 6.0 and UDFToDouble(lo_quantity) BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((UDFToInteger(d_yearmonthnum) = 199401) and UDFToDouble(lo_discount) BETWEEN 4.0D AND 6.0D and UDFToDouble(lo_quantity) BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 560 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -852,7 +852,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 744 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToInteger(d_weeknuminyear) = 6) and (UDFToInteger(d_year) = 1994) and UDFToDouble(lo_discount) BETWEEN 5.0 AND 7.0 and UDFToDouble(lo_quantity) BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((UDFToInteger(d_weeknuminyear) = 6) and (UDFToInteger(d_year) = 1994) and UDFToDouble(lo_discount) BETWEEN 5.0D AND 7.0D and UDFToDouble(lo_quantity) BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 744 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: discounted_price (type: double) diff --git a/ql/src/test/results/clientpositive/llap/mrr.q.out b/ql/src/test/results/clientpositive/llap/mrr.q.out index bfa26e4ff8..4fff286751 100644 --- a/ql/src/test/results/clientpositive/llap/mrr.q.out +++ b/ql/src/test/results/clientpositive/llap/mrr.q.out @@ -1394,7 +1394,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 83 Data size: 7885 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/multi_count_distinct_null.q.out b/ql/src/test/results/clientpositive/llap/multi_count_distinct_null.q.out index 66bf74f15a..c210c4c20d 100644 --- a/ql/src/test/results/clientpositive/llap/multi_count_distinct_null.q.out +++ b/ql/src/test/results/clientpositive/llap/multi_count_distinct_null.q.out @@ -48,7 +48,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: _col0 (type: int), _col1 (type: varchar(10)), _col2 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: varchar(10)), _col2 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 1628 Basic stats: COMPLETE Column stats: COMPLETE @@ -68,7 +68,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 1628 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CASE WHEN (((_col3 = 3) and _col0 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 5) and _col1 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 6) and _col2 is not null)) THEN (1) ELSE (null) END (type: int) + expressions: CASE WHEN (((_col3 = 3L) and _col0 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 5L) and _col1 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 6L) and _col2 is not null)) THEN (1) ELSE (null) END (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 1628 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -189,7 +189,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 1023 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: _col0 (type: varchar(10)), _col1 (type: int), _col2 (type: int), 0 (type: bigint) + keys: _col0 (type: varchar(10)), _col1 (type: int), _col2 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 2654 Basic stats: COMPLETE Column stats: COMPLETE @@ -209,7 +209,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 30 Data size: 2654 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CASE WHEN (((_col3 = 3) and _col0 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 5) and _col1 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 6) and _col2 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN ((_col3 = 4)) THEN (1) ELSE (null) END (type: int), CASE WHEN ((_col3 = 0)) THEN (1) ELSE (null) END (type: int) + expressions: CASE WHEN (((_col3 = 3L) and _col0 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 5L) and _col1 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN (((_col3 = 6L) and _col2 is not null)) THEN (1) ELSE (null) END (type: int), CASE WHEN ((_col3 = 4L)) THEN (1) ELSE (null) END (type: int), CASE WHEN ((_col3 = 0L)) THEN (1) ELSE (null) END (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 30 Data size: 2654 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator diff --git a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out index ec3c286102..cce6bc3c0a 100644 --- a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out +++ b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out @@ -198,7 +198,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -295,7 +295,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator diff --git a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out index 6eba365ff2..c1815e85aa 100644 --- a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out @@ -44,10 +44,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) @@ -174,10 +174,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out index 1583007d7b..a340b9774d 100644 --- a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out @@ -44,10 +44,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) @@ -227,10 +227,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out index 0e9b34f8ec..1480933d6d 100644 --- a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out @@ -46,7 +46,7 @@ STAGE PLANS: alias: orc_merge5 Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out index 9f20f77cb0..2a1fd8093f 100644 --- a/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out @@ -323,7 +323,7 @@ STAGE PLANS: alias: orc_pred Statistics: Num rows: 1049 Data size: 4188 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToInteger(t) > -2) and (t < 0)) (type: boolean) + predicate: ((UDFToInteger(t) > -2) and (t < 0Y)) (type: boolean) Statistics: Num rows: 116 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: hash(t) (type: int) @@ -388,10 +388,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_pred - filterExpr: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean) + filterExpr: ((t < 0Y) and (UDFToInteger(t) > -2)) (type: boolean) Statistics: Num rows: 1049 Data size: 4188 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToInteger(t) > -2) and (t < 0)) (type: boolean) + predicate: ((UDFToInteger(t) > -2) and (t < 0Y)) (type: boolean) Statistics: Num rows: 116 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: hash(t) (type: int) @@ -485,7 +485,7 @@ STAGE PLANS: Filter Operator predicate: ((s like 'bob%') and (t = -1) and s is not null) (type: boolean) Select Operator - expressions: -1 (type: tinyint), s (type: string) + expressions: -1Y (type: tinyint), s (type: string) outputColumnNames: _col0, _col1 ListSink @@ -513,7 +513,7 @@ STAGE PLANS: Filter Operator predicate: ((s like 'bob%') and (t = -1) and s is not null) (type: boolean) Select Operator - expressions: -1 (type: tinyint), s (type: string) + expressions: -1Y (type: tinyint), s (type: string) outputColumnNames: _col0, _col1 ListSink @@ -788,7 +788,7 @@ STAGE PLANS: alias: orc_pred Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10.0) and (not (s like '%car%')) and (s like '%son') and (t > 0) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10.0D) and (not (s like '%car%')) and (s like '%son') and (t > 0Y) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -864,10 +864,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_pred - filterExpr: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and UDFToInteger(si) BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean) + filterExpr: ((d >= 10.0D) and (d < 12.0D) and (s like '%son') and (t > 0Y) and UDFToInteger(si) BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean) Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10.0) and (not (s like '%car%')) and (s like '%son') and (t > 0) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10.0D) and (not (s like '%car%')) and (s like '%son') and (t > 0Y) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -1012,7 +1012,7 @@ STAGE PLANS: alias: orc_pred Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101) and (t > 0) and (t > 10) and si BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101Y) and (t > 0Y) and (t > 10Y) and si BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -1109,10 +1109,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_pred - filterExpr: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean) + filterExpr: ((t > 10Y) and (t <> 101Y) and (d >= 10) and (d < 12.0D) and (s like '%son') and (not (s like '%car%')) and (t > 0Y) and si BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101) and (t > 0) and (t > 10) and si BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101Y) and (t > 0Y) and (t > 10Y) and si BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) diff --git a/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out index 6885346539..3f91c5ca91 100644 --- a/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out @@ -328,7 +328,7 @@ STAGE PLANS: TableScan alias: tbl_pred Filter Operator - predicate: ((UDFToInteger(t) > -2) and (t < 0)) (type: boolean) + predicate: ((UDFToInteger(t) > -2) and (t < 0Y)) (type: boolean) Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 @@ -358,9 +358,9 @@ STAGE PLANS: Processor Tree: TableScan alias: tbl_pred - filterExpr: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean) + filterExpr: ((t < 0Y) and (UDFToInteger(t) > -2)) (type: boolean) Filter Operator - predicate: ((UDFToInteger(t) > -2) and (t < 0)) (type: boolean) + predicate: ((UDFToInteger(t) > -2) and (t < 0Y)) (type: boolean) Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 @@ -423,7 +423,7 @@ STAGE PLANS: Filter Operator predicate: ((s like 'bob%') and (t = -1) and s is not null) (type: boolean) Select Operator - expressions: -1 (type: tinyint), s (type: string) + expressions: -1Y (type: tinyint), s (type: string) outputColumnNames: _col0, _col1 ListSink @@ -451,7 +451,7 @@ STAGE PLANS: Filter Operator predicate: ((s like 'bob%') and (t = -1) and s is not null) (type: boolean) Select Operator - expressions: -1 (type: tinyint), s (type: string) + expressions: -1Y (type: tinyint), s (type: string) outputColumnNames: _col0, _col1 ListSink @@ -726,7 +726,7 @@ STAGE PLANS: alias: tbl_pred Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10.0) and (not (s like '%car%')) and (s like '%son') and (t > 0) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10.0D) and (not (s like '%car%')) and (s like '%son') and (t > 0Y) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -802,10 +802,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_pred - filterExpr: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and UDFToInteger(si) BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean) + filterExpr: ((d >= 10.0D) and (d < 12.0D) and (s like '%son') and (t > 0Y) and UDFToInteger(si) BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean) Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10.0) and (not (s like '%car%')) and (s like '%son') and (t > 0) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10.0D) and (not (s like '%car%')) and (s like '%son') and (t > 0Y) and UDFToInteger(si) BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -995,7 +995,7 @@ STAGE PLANS: alias: tbl_pred Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101) and (t > 0) and (t > 10) and si BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101Y) and (t > 0Y) and (t > 10Y) and si BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -1092,10 +1092,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_pred - filterExpr: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean) + filterExpr: ((t > 10Y) and (t <> 101Y) and (d >= 10) and (d < 12.0D) and (s like '%son') and (not (s like '%car%')) and (t > 0Y) and si BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1049 Data size: 118521 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((d < 12.0) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101) and (t > 0) and (t > 10) and si BETWEEN 300 AND 400) (type: boolean) + predicate: ((d < 12.0D) and (d >= 10) and (not (s like '%car%')) and (s like '%son') and (t <> 101Y) and (t > 0Y) and (t > 10Y) and si BETWEEN 300 AND 400) (type: boolean) Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string) @@ -1204,10 +1204,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_pred - filterExpr: ((f < 123.2) and (f > 1.92) and (f >= 9.99) and f BETWEEN 1.92 AND 123.2 and (i < 67627) and (i > 60627) and (i >= 60626) and i BETWEEN 60626 AND 67627 and (b < 4294967861) and (b > 4294967261) and (b >= 4294967260) and b BETWEEN 4294967261 AND 4294967861) (type: boolean) + filterExpr: ((f < 123.2) and (f > 1.92) and (f >= 9.99) and f BETWEEN 1.92 AND 123.2 and (i < 67627) and (i > 60627) and (i >= 60626) and i BETWEEN 60626 AND 67627 and (b < 4294967861L) and (b > 4294967261L) and (b >= 4294967260L) and b BETWEEN 4294967261L AND 4294967861L) (type: boolean) Statistics: Num rows: 1049 Data size: 16784 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b < 4294967861) and (b > 4294967261) and (b >= 4294967260) and (f < 123.2) and (f > 1.92) and (f >= 9.99) and (i < 67627) and (i > 60627) and (i >= 60626) and b BETWEEN 4294967261 AND 4294967861 and f BETWEEN 1.92 AND 123.2 and i BETWEEN 60626 AND 67627) (type: boolean) + predicate: ((b < 4294967861L) and (b > 4294967261L) and (b >= 4294967260L) and (f < 123.2) and (f > 1.92) and (f >= 9.99) and (i < 67627) and (i > 60627) and (i >= 60626) and b BETWEEN 4294967261L AND 4294967861L and f BETWEEN 1.92 AND 123.2 and i BETWEEN 60626 AND 67627) (type: boolean) Statistics: Num rows: 38 Data size: 608 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: f (type: float), i (type: int), b (type: bigint) diff --git a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out index b4ac5a6062..fb421e2b12 100644 --- a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out +++ b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out @@ -143,13 +143,13 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: key (type: string), (UDFToDouble(value) + 1.0) (type: double) + expressions: key (type: string), (UDFToDouble(value) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: _col0 (type: string), (_col1 + 1.0) (type: double) + key expressions: _col0 (type: string), (_col1 + 1.0D) (type: double) sort order: ++ - Map-reduce partition columns: _col0 (type: string), (_col1 + 1.0) (type: double) + Map-reduce partition columns: _col0 (type: string), (_col1 + 1.0D) (type: double) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: double) Execution mode: llap @@ -158,7 +158,7 @@ STAGE PLANS: Execution mode: llap Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: string), (VALUE._col0 + 1.0) (type: double) + expressions: KEY.reducesinkkey0 (type: string), (VALUE._col0 + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -2697,13 +2697,13 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: key (type: string), (UDFToDouble(value) + 1.0) (type: double) + expressions: key (type: string), (UDFToDouble(value) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: _col0 (type: string), (_col1 + 1.0) (type: double) + key expressions: _col0 (type: string), (_col1 + 1.0D) (type: double) sort order: ++ - Map-reduce partition columns: _col0 (type: string), (_col1 + 1.0) (type: double) + Map-reduce partition columns: _col0 (type: string), (_col1 + 1.0D) (type: double) Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: double) Execution mode: llap @@ -2712,7 +2712,7 @@ STAGE PLANS: Execution mode: llap Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: string), (VALUE._col0 + 1.0) (type: double) + expressions: KEY.reducesinkkey0 (type: string), (VALUE._col0 + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator diff --git a/ql/src/test/results/clientpositive/llap/sqlmerge.q.out b/ql/src/test/results/clientpositive/llap/sqlmerge.q.out index 2ff590ea0c..88ea24c215 100644 --- a/ql/src/test/results/clientpositive/llap/sqlmerge.q.out +++ b/ql/src/test/results/clientpositive/llap/sqlmerge.q.out @@ -184,7 +184,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cardinality_violation(_col0) (type: int) diff --git a/ql/src/test/results/clientpositive/llap/subquery_in.q.out b/ql/src/test/results/clientpositive/llap/subquery_in.q.out index b5f9641502..277036a8e5 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_in.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_in.q.out @@ -4866,7 +4866,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col16 Statistics: Num rows: 8 Data size: 5052 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col12 = 0)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col12 = 0L)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 4 Data size: 2536 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -5123,7 +5123,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col16 Statistics: Num rows: 8 Data size: 5052 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col12 = 0)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col12 = 0L)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 4 Data size: 2536 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out b/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out index 1c6c1202d4..390caf0a01 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_in_having.q.out @@ -1846,7 +1846,7 @@ STAGE PLANS: 1 Reducer 8 Statistics: Num rows: 1 Data size: 668 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 1 Data size: 668 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) diff --git a/ql/src/test/results/clientpositive/llap/subquery_multi.q.out b/ql/src/test/results/clientpositive/llap/subquery_multi.q.out index de543c98d6..438e44470a 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_multi.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_multi.q.out @@ -380,7 +380,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 1 Data size: 1576 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 1 Data size: 1576 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -588,7 +588,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 1 Data size: 1576 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 1 Data size: 1576 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -825,7 +825,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 1 Data size: 1576 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 1 Data size: 1576 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -1019,7 +1019,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 1 Data size: 1345 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col1 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col1 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 1345 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -1809,7 +1809,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 16 Data size: 3891 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2266,7 +2266,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 16 Data size: 5484 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 8 Data size: 2742 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2533,7 +2533,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 16 Data size: 3891 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 8 Data size: 1945 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -3006,7 +3006,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2, _col4, _col5, _col7 Statistics: Num rows: 2 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col4 = 0) or (_col7 is null and _col2 is not null and (_col5 >= _col4))) (type: boolean) + predicate: ((_col4 = 0L) or (_col7 is null and _col2 is not null and (_col5 >= _col4))) (type: boolean) Statistics: Num rows: 2 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), 1 (type: int) @@ -3864,7 +3864,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 16518 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col0 = 3) or CASE WHEN ((_col9 = 0)) THEN (true) WHEN (_col12 is not null) THEN (false) WHEN (_col5 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (true) END) (type: boolean) + predicate: ((_col0 = 3) or CASE WHEN ((_col9 = 0L)) THEN (true) WHEN (_col12 is not null) THEN (false) WHEN (_col5 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (true) END) (type: boolean) Statistics: Num rows: 14 Data size: 8898 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4076,7 +4076,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col4, _col5 Statistics: Num rows: 500 Data size: 52040 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col4 is not null and (_col2 <> 0)) or _col1 is not null or _col5 is not null) (type: boolean) + predicate: ((_col4 is not null and (_col2 <> 0L)) or _col1 is not null or _col5 is not null) (type: boolean) Statistics: Num rows: 500 Data size: 52040 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 500 Data size: 52040 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out index 247d8f325c..851a783fd6 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out @@ -103,7 +103,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 500 Data size: 97540 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 500 Data size: 97540 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -388,7 +388,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8 Statistics: Num rows: 26 Data size: 5994 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 3007 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int) @@ -666,7 +666,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 26 Data size: 3674 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 26 Data size: 3674 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) @@ -969,7 +969,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col10 Statistics: Num rows: 8 Data size: 1924 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col6 = 0)) THEN (false) WHEN (_col6 is null) THEN (false) WHEN (_col10 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col7 < _col6)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col6 = 0L)) THEN (false) WHEN (_col6 is null) THEN (false) WHEN (_col10 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col7 < _col6)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 4 Data size: 964 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int) @@ -1517,7 +1517,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 166 Data size: 17366 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 166 Data size: 17366 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) @@ -1690,7 +1690,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 26 Data size: 16394 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 8207 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -1870,7 +1870,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 16542 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 26 Data size: 16542 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2077,7 +2077,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 16538 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col12 is null and _col0 is not null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col0 is not null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 26 Data size: 16538 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2324,7 +2324,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col7 Statistics: Num rows: 26 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 13 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE @@ -2506,7 +2506,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 16538 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col12 is null and _col7 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col7 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 26 Data size: 16538 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2699,7 +2699,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 26 Data size: 16250 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 8127 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2930,7 +2930,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col16 Statistics: Num rows: 26 Data size: 16374 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col11 = 0)) THEN (false) WHEN (_col11 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col12 < _col11)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col11 = 0L)) THEN (false) WHEN (_col11 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col12 < _col11)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 8187 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -3043,7 +3043,7 @@ STAGE PLANS: predicate: p_brand is not null (type: boolean) Statistics: Num rows: 26 Data size: 5096 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: p_brand (type: string), (UDFToDouble(p_type) + 2.0) (type: double) + expressions: p_brand (type: string), (UDFToDouble(p_type) + 2.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 5096 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -3059,7 +3059,7 @@ STAGE PLANS: Statistics: Num rows: 13 Data size: 1404 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint), _col2 (type: bigint) Select Operator - expressions: (UDFToDouble(p_type) + 2.0) (type: double), p_brand (type: string) + expressions: (UDFToDouble(p_type) + 2.0D) (type: double), p_brand (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -3103,7 +3103,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col4, _col5, _col8 Statistics: Num rows: 26 Data size: 6262 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 3133 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) @@ -3272,7 +3272,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col4, _col5, _col8 Statistics: Num rows: 26 Data size: 6046 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 3025 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) @@ -3522,7 +3522,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 500 Data size: 98620 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 500 Data size: 98620 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) @@ -3811,7 +3811,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col7 Statistics: Num rows: 500 Data size: 97100 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 250 Data size: 48560 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) @@ -4070,7 +4070,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 16542 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 26 Data size: 16542 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4278,7 +4278,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 16542 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 26 Data size: 16542 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4557,7 +4557,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 500 Data size: 97164 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 500 Data size: 97164 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -5235,7 +5235,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col7 Statistics: Num rows: 26 Data size: 492 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 13 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 13 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE @@ -5458,7 +5458,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 4 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 4 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -5627,7 +5627,7 @@ STAGE PLANS: outputColumnNames: _col0, _col3, _col4, _col7 Statistics: Num rows: 4 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -5908,7 +5908,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col7 Statistics: Num rows: 3 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -6130,7 +6130,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col6 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -6354,7 +6354,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col6 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -6541,7 +6541,7 @@ STAGE PLANS: outputColumnNames: _col0, _col3, _col4, _col7 Statistics: Num rows: 3 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -6700,7 +6700,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 3 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -6848,7 +6848,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 3 Data size: 68 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 3 Data size: 68 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) @@ -7033,7 +7033,7 @@ STAGE PLANS: residual filter predicates: {(_col1 > _col6)} Statistics: Num rows: 500 Data size: 104497 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 250 Data size: 52304 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/subquery_null_agg.q.out b/ql/src/test/results/clientpositive/llap/subquery_null_agg.q.out index cff9568406..85a64f1628 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_null_agg.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_null_agg.q.out @@ -125,7 +125,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2 Statistics: Num rows: 1 Data size: 10 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col1 = 0) or _col2 is null) (type: boolean) + predicate: ((_col1 = 0L) or _col2 is null) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: null (type: void) diff --git a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out index 6a2c635889..0f7681fb52 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out @@ -2838,7 +2838,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: bigint) @@ -3002,7 +3002,7 @@ STAGE PLANS: outputColumnNames: _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -3374,7 +3374,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 14 Data size: 1787 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 9 Data size: 1148 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -3590,7 +3590,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 14 Data size: 1787 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col9 = 0)) THEN (false) WHEN (_col12 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col9 = 0L)) THEN (false) WHEN (_col12 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 7 Data size: 893 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -6349,7 +6349,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (0.0 = _col1) (type: boolean) + predicate: (0.0D = _col1) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/subquery_select.q.out b/ql/src/test/results/clientpositive/llap/subquery_select.q.out index a399d3e293..abbfffd9be 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_select.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_select.q.out @@ -92,7 +92,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 26 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -282,7 +282,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col7 Statistics: Num rows: 26 Data size: 404 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col1 (type: int), CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col1 (type: int), CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -478,7 +478,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 26 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (true) WHEN (_col4 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (true) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (true) WHEN (_col4 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (true) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -714,7 +714,7 @@ STAGE PLANS: outputColumnNames: _col1, _col5, _col6, _col9 Statistics: Num rows: 8 Data size: 132 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col1 (type: int), CASE WHEN ((_col5 = 0)) THEN (true) WHEN (_col5 is null) THEN (true) WHEN (_col9 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col6 < _col5)) THEN (null) ELSE (true) END (type: boolean) + expressions: _col1 (type: int), CASE WHEN ((_col5 = 0L)) THEN (true) WHEN (_col5 is null) THEN (true) WHEN (_col9 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col6 < _col5)) THEN (null) ELSE (true) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -2237,7 +2237,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 26 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -3246,7 +3246,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2, _col4, _col5, _col8, _col9, _col10, _col12 Statistics: Num rows: 26 Data size: 3974 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col2 (type: int), (CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (null) ELSE (false) END and CASE WHEN ((_col9 = 0)) THEN (false) WHEN (_col12 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (false) END) (type: boolean) + expressions: _col2 (type: int), (CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (null) ELSE (false) END and CASE WHEN ((_col9 = 0L)) THEN (false) WHEN (_col12 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (false) END) (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -3587,7 +3587,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 26 Data size: 528 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -3902,7 +3902,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3 Statistics: Num rows: 26 Data size: 520 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CASE WHEN ((_col1 > 409437)) THEN (_col2) ELSE (_col3) END (type: double) + expressions: CASE WHEN ((_col1 > 409437L)) THEN (_col2) ELSE (_col3) END (type: double) outputColumnNames: _col0 Statistics: Num rows: 26 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -4098,7 +4098,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (_col0 - 1) (type: bigint) + expressions: (_col0 - 1L) (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -4880,7 +4880,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2 Statistics: Num rows: 13 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 > 0) (type: boolean) + predicate: (_col2 > 0L) (type: boolean) Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: int) @@ -5277,7 +5277,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2 Statistics: Num rows: 13 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col2 > 0) (type: boolean) + predicate: (_col2 > 0L) (type: boolean) Statistics: Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col1 (type: int) diff --git a/ql/src/test/results/clientpositive/llap/subquery_views.q.out b/ql/src/test/results/clientpositive/llap/subquery_views.q.out index 01a86d1300..01f1252d74 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_views.q.out @@ -234,7 +234,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col4, _col5, _col8 Statistics: Num rows: 87 Data size: 17226 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: CASE WHEN ((_col4 = 0)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) Statistics: Num rows: 43 Data size: 8514 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -334,7 +334,7 @@ STAGE PLANS: outputColumnNames: _col0, _col4, _col5, _col8 Statistics: Num rows: 87 Data size: 9309 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: CASE WHEN ((_col4 = 0)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) Statistics: Num rows: 43 Data size: 4601 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/temp_table.q.out b/ql/src/test/results/clientpositive/llap/temp_table.q.out index b72fcfe6a2..b04c643697 100644 --- a/ql/src/test/results/clientpositive/llap/temp_table.q.out +++ b/ql/src/test/results/clientpositive/llap/temp_table.q.out @@ -20,7 +20,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), value (type: string) @@ -93,7 +93,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out index 642bda2736..c471cd6da7 100644 --- a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out +++ b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out @@ -41,7 +41,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (csmallint < 100) (type: boolean) + predicate: (csmallint < 100S) (type: boolean) Statistics: Num rows: 4096 Data size: 1031250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) @@ -232,7 +232,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (csmallint < 100) (type: boolean) + predicate: (csmallint < 100S) (type: boolean) Statistics: Num rows: 4096 Data size: 1031250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) @@ -423,7 +423,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (csmallint < 100) (type: boolean) + predicate: (csmallint < 100S) (type: boolean) Statistics: Num rows: 4096 Data size: 1031250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) diff --git a/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out index 79321c1460..4b7cd4c77b 100644 --- a/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out +++ b/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out @@ -162,7 +162,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator - predicate: ((date < '2014-09-02') and (u <> 0)) (type: boolean) + predicate: ((date < '2014-09-02') and (u <> 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: u (type: bigint), date (type: string) @@ -203,7 +203,7 @@ STAGE PLANS: alias: y Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator - predicate: ((date < '2014-09-02') and (u <> 0)) (type: boolean) + predicate: ((date < '2014-09-02') and (u <> 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: u (type: bigint), date (type: string) @@ -229,7 +229,7 @@ STAGE PLANS: alias: z Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator - predicate: ((date < '2014-09-02') and (u <> 0)) (type: boolean) + predicate: ((date < '2014-09-02') and (u <> 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: u (type: bigint), date (type: string) @@ -255,7 +255,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 1 Data size: 560 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator - predicate: ((date < '2014-09-03') and (date >= '2014-03-04') and (u <> 0) and t is not null) (type: boolean) + predicate: ((date < '2014-09-03') and (date >= '2014-03-04') and (u <> 0L) and t is not null) (type: boolean) Statistics: Num rows: 1 Data size: 560 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: t (type: string), st (type: string) diff --git a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out index 1ce7a3a37c..fc8b31545a 100644 --- a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out +++ b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out @@ -41,7 +41,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (csmallint < 100) (type: boolean) + predicate: (csmallint < 100S) (type: boolean) Statistics: Num rows: 4096 Data size: 1031250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) @@ -232,7 +232,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (csmallint < 100) (type: boolean) + predicate: (csmallint < 100S) (type: boolean) Statistics: Num rows: 4096 Data size: 1031250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) @@ -423,7 +423,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 12288 Data size: 3093170 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (csmallint < 100) (type: boolean) + predicate: (csmallint < 100S) (type: boolean) Statistics: Num rows: 4096 Data size: 1031250 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean) diff --git a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out index 7f7e78243d..b4594b9952 100644 --- a/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out +++ b/ql/src/test/results/clientpositive/llap/unionDistinct_1.q.out @@ -3758,7 +3758,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k0) <= 50.0) (type: boolean) + predicate: (UDFToDouble(k0) <= 50.0D) (type: boolean) Statistics: Num rows: 166 Data size: 73538 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: k1 (type: string), k2 (type: string), k3 (type: string), k4 (type: string) @@ -3839,7 +3839,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k1) > 20.0) (type: boolean) + predicate: (UDFToDouble(k1) > 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 60092 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: k1 (type: string), k2 (type: string), ds (type: string) @@ -3916,7 +3916,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(k0) > 50.0) and (UDFToDouble(k1) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(k0) > 50.0D) and (UDFToDouble(k1) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 19360 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: k1 (type: string), k3 (type: string), k4 (type: string) @@ -5051,7 +5051,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -5131,7 +5131,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18093 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -5209,7 +5209,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -5289,7 +5289,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -5548,7 +5548,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -5628,7 +5628,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -5708,7 +5708,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18093 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -5783,7 +5783,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -6017,7 +6017,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -6097,7 +6097,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18093 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -6172,7 +6172,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18880 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -6252,7 +6252,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 18093 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -8005,7 +8005,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 87584 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 43792 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -8022,7 +8022,7 @@ STAGE PLANS: alias: dim_pho Statistics: Num rows: 500 Data size: 175168 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 87584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -8044,7 +8044,7 @@ STAGE PLANS: alias: jackson_sev_add Statistics: Num rows: 500 Data size: 175168 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 87584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -11120,7 +11120,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), value (type: string) @@ -11342,7 +11342,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out b/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out index 146cc62d70..caf9ce8cb9 100644 --- a/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out +++ b/ql/src/test/results/clientpositive/llap/unionDistinct_3.q.out @@ -1692,7 +1692,7 @@ STAGE PLANS: alias: s4 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), value (type: string) @@ -1716,7 +1716,7 @@ STAGE PLANS: alias: s2 Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/llap/union_top_level.q.out b/ql/src/test/results/clientpositive/llap/union_top_level.q.out index 0cf6732f68..a2d0c955af 100644 --- a/ql/src/test/results/clientpositive/llap/union_top_level.q.out +++ b/ql/src/test/results/clientpositive/llap/union_top_level.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -54,7 +54,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -420,7 +420,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -442,7 +442,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -464,7 +464,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -655,7 +655,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -676,7 +676,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -697,7 +697,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -932,7 +932,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -953,7 +953,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) @@ -974,7 +974,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 21750 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out index d77684b984..de4dd0fc27 100644 --- a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out @@ -673,7 +673,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -834,7 +834,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out index f4f87ae0ee..6093bebdfb 100644 --- a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out @@ -56,7 +56,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnInList(col 3:date, values [-67, -171]) - predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean) + predicate: (cdate) IN (DATE'1969-10-26', DATE'1969-07-14') (type: boolean) Statistics: Num rows: 10 Data size: 532 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date) @@ -151,7 +151,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: SelectColumnIsFalse(col 5:boolean)(children: LongColumnInList(col 3, values [-67, -171, 20]) -> 5:boolean) - predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean) + predicate: (not (cdate) IN (DATE'1969-10-26', DATE'1969-07-14', DATE'1970-01-21')) (type: boolean) Statistics: Num rows: 12274 Data size: 653057 Basic stats: COMPLETE Column stats: NONE Select Operator Select Vectorization: @@ -464,7 +464,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnBetween(col 3:date, left -2, right 1) - predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean) + predicate: cdate BETWEEN DATE'1969-12-30' AND DATE'1970-01-02' (type: boolean) Statistics: Num rows: 1365 Data size: 72627 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date) @@ -559,7 +559,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnNotBetween(col 3:date, left -610, right 608) - predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean) + predicate: cdate NOT BETWEEN DATE'1968-05-01' AND DATE'1971-09-01' (type: boolean) Statistics: Num rows: 10924 Data size: 581228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date) @@ -1100,7 +1100,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean) + expressions: (cdate) IN (DATE'1969-10-26', DATE'1969-07-14') (type: boolean) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1376,13 +1376,13 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean) + expressions: cdate BETWEEN DATE'1969-12-30' AND DATE'1970-01-02' (type: boolean) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [5] - selectExpressions: VectorUDFAdaptor(cdate BETWEEN 1969-12-30 AND 1970-01-02) -> 5:boolean + selectExpressions: VectorUDFAdaptor(cdate BETWEEN DATE'1969-12-30' AND DATE'1970-01-02') -> 5:boolean Statistics: Num rows: 12289 Data size: 653856 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out index 22b772279b..4cb6213b89 100644 --- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out @@ -142,7 +142,7 @@ STAGE PLANS: projectedOutputColumnNums: [2] Statistics: Num rows: 1049 Data size: 4196 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - aggregations: avg(50), avg(50.0), avg(50) + aggregations: avg(50), avg(50.0D), avg(50) Group By Vectorization: aggregators: VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 12:int) -> struct, VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 13:double) -> struct, VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 14:decimal(10,0)) -> struct className: VectorGroupByOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out index a53ed39db2..e8bb722080 100644 --- a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out @@ -129,7 +129,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0), 2) (type: double) + expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator @@ -354,7 +354,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0), 2) (type: double) + expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_date_1.q.out b/ql/src/test/results/clientpositive/llap/vector_date_1.q.out index bacd667498..2dbc29bcee 100644 --- a/ql/src/test/results/clientpositive/llap/vector_date_1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_date_1.q.out @@ -445,7 +445,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:dt1:date, 1:dt2:date, 2:ROW__ID:struct] Select Operator - expressions: dt1 (type: date), (dt1 <> 1970-01-01) (type: boolean), (dt1 >= 1970-01-01) (type: boolean), (dt1 > 1970-01-01) (type: boolean), (dt1 <= 2100-01-01) (type: boolean), (dt1 < 2100-01-01) (type: boolean), (1970-01-01 <> dt1) (type: boolean), (1970-01-01 <= dt1) (type: boolean), (1970-01-01 < dt1) (type: boolean) + expressions: dt1 (type: date), (dt1 <> DATE'1970-01-01') (type: boolean), (dt1 >= DATE'1970-01-01') (type: boolean), (dt1 > DATE'1970-01-01') (type: boolean), (dt1 <= DATE'2100-01-01') (type: boolean), (dt1 < DATE'2100-01-01') (type: boolean), (DATE'1970-01-01' <> dt1) (type: boolean), (DATE'1970-01-01' <= dt1) (type: boolean), (DATE'1970-01-01' < dt1) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator @@ -610,7 +610,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:dt1:date, 1:dt2:date, 2:ROW__ID:struct] Select Operator - expressions: dt1 (type: date), (dt1 = 1970-01-01) (type: boolean), (dt1 <= 1970-01-01) (type: boolean), (dt1 < 1970-01-01) (type: boolean), (dt1 >= 2100-01-01) (type: boolean), (dt1 > 2100-01-01) (type: boolean), (1970-01-01 = dt1) (type: boolean), (1970-01-01 >= dt1) (type: boolean), (1970-01-01 > dt1) (type: boolean) + expressions: dt1 (type: date), (dt1 = DATE'1970-01-01') (type: boolean), (dt1 <= DATE'1970-01-01') (type: boolean), (dt1 < DATE'1970-01-01') (type: boolean), (dt1 >= DATE'2100-01-01') (type: boolean), (dt1 > DATE'2100-01-01') (type: boolean), (DATE'1970-01-01' = dt1) (type: boolean), (DATE'1970-01-01' >= dt1) (type: boolean), (DATE'1970-01-01' > dt1) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator @@ -944,10 +944,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDateScalarEqualDateColumn(val 11323, col 0:date), FilterDateColNotEqualDateScalar(col 0:date, val 0), FilterDateScalarNotEqualDateColumn(val 0, col 0:date)) - predicate: ((1970-01-01 <> dt1) and (2001-01-01 = dt1) and (dt1 <> 1970-01-01)) (type: boolean) + predicate: ((DATE'1970-01-01' <> dt1) and (DATE'2001-01-01' = dt1) and (dt1 <> DATE'1970-01-01')) (type: boolean) Statistics: Num rows: 3 Data size: 336 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 2001-01-01 (type: date), dt2 (type: date) + expressions: DATE'2001-01-01' (type: date), dt2 (type: date) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator @@ -1056,7 +1056,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnInList(col 0:date, values [0, 11323]) - predicate: (dt1) IN (1970-01-01, 2001-01-01) (type: boolean) + predicate: (dt1) IN (DATE'1970-01-01', DATE'2001-01-01') (type: boolean) Statistics: Num rows: 3 Data size: 168 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dt1 (type: date) diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out index 5cb3e6678f..af47b072dc 100644 --- a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out @@ -158,7 +158,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 9:bigint, val 1) - predicate: (_col9 > 1) (type: boolean) + predicate: (_col9 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 443651 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)) @@ -317,7 +317,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 6144 Data size: 1330955 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col15 > 1) (type: boolean) + predicate: (_col15 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 443651 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: decimal(24,14)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: decimal(27,18)), _col13 (type: double), _col14 (type: double) @@ -524,7 +524,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 9:bigint, val 1) - predicate: (_col9 > 1) (type: boolean) + predicate: (_col9 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 443651 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 (type: decimal(26,0)) @@ -703,7 +703,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 6144 Data size: 1330955 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col15 > 1) (type: boolean) + predicate: (_col15 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 443651 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 (type: decimal(15,9)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(16,0)), _col10 (type: decimal(16,0)), _col11 (type: decimal(26,0)), _col12 (type: decimal(20,4)), _col13 (type: double), _col14 (type: double) diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out index e9023a47d2..3d9d6f127c 100644 --- a/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out @@ -124,10 +124,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 7:double, val -1.0)(children: FuncSinDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(20,10)) -> 6:double) -> 7:double)) - predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 233500 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(20,10)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double) + expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(20,10)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 Select Vectorization: className: VectorSelectOperator @@ -370,10 +370,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 7:double, val -1.0)(children: FuncSinDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 6:double) -> 7:double)) - predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 233500 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdecimal1 (type: decimal(12,4)), round(cdecimal1, 2) (type: decimal(11,2)), round(cdecimal1) (type: decimal(9,0)), floor(cdecimal1) (type: decimal(9,0)), ceil(cdecimal1) (type: decimal(9,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(12,4)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(12,4)), (- cdecimal1) (type: decimal(12,4)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double) + expressions: cdecimal1 (type: decimal(12,4)), round(cdecimal1, 2) (type: decimal(11,2)), round(cdecimal1) (type: decimal(9,0)), floor(cdecimal1) (type: decimal(9,0)), ceil(cdecimal1) (type: decimal(9,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(12,4)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(12,4)), (- cdecimal1) (type: decimal(12,4)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out index f96c7697bd..e6d4ff0423 100644 --- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out @@ -320,7 +320,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -437,7 +437,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) + 1.0) (type: double) + expressions: (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -788,7 +788,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -905,7 +905,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) - 1.0) (type: double) + expressions: (UDFToDouble(key) - 1.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1363,7 +1363,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1480,7 +1480,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) * 2.0) (type: double) + expressions: (UDFToDouble(key) * 2.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1949,7 +1949,7 @@ STAGE PLANS: predicate: (value <> 0) (type: boolean) Statistics: Num rows: 39 Data size: 4412 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -2051,7 +2051,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(20,10), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (1.0 + (UDFToDouble(key) / 2.0)) (type: double) + expressions: (1.0D + (UDFToDouble(key) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -4241,7 +4241,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -4359,7 +4359,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) + 1.0) (type: double) + expressions: (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -4713,7 +4713,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -4831,7 +4831,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) - 1.0) (type: double) + expressions: (UDFToDouble(key) - 1.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -5293,7 +5293,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -5411,7 +5411,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (UDFToDouble(key) * 2.0) (type: double) + expressions: (UDFToDouble(key) * 2.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -5884,7 +5884,7 @@ STAGE PLANS: predicate: (value <> 0) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0)) (type: double) + expressions: (UDFToDouble(key) / (UDFToDouble(value) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -5987,7 +5987,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct] Select Operator - expressions: (1.0 + (UDFToDouble(key) / 2.0)) (type: double) + expressions: (1.0D + (UDFToDouble(key) / 2.0D)) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out index 75782f8062..30cb55e505 100644 --- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out @@ -95,7 +95,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: null (type: double), null (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double) + expressions: null (type: double), null (type: double), 1.4711276743037347D (type: double), -0.8390715290764524D (type: double), -0.5440211108893698D (type: double), 0.6483608274590866D (type: double), 0.17453292519943295D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -204,7 +204,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Select Vectorization: className: VectorSelectOperator @@ -319,7 +319,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), null (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double) + expressions: null (type: double), null (type: double), 1.4711276743037347D (type: double), -0.8390715290764524D (type: double), -0.5440211108893698D (type: double), 0.6483608274590866D (type: double), 0.17453292519943295D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -429,7 +429,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_elt.q.out b/ql/src/test/results/clientpositive/llap/vector_elt.q.out index 24a1a65022..98db7da733 100644 --- a/ql/src/test/results/clientpositive/llap/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_elt.q.out @@ -31,7 +31,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 0:tinyint, val 0) - predicate: (ctinyint > 0) (type: boolean) + predicate: (ctinyint > 0Y) (type: boolean) Statistics: Num rows: 4096 Data size: 312018 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string) diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out index 04ac09b0a6..f1ed1461ee 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out @@ -47,7 +47,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE @@ -120,7 +120,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE @@ -219,7 +219,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE @@ -317,7 +317,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE @@ -404,7 +404,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE @@ -519,7 +519,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE @@ -649,7 +649,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE @@ -665,7 +665,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out index fd9dacbb12..e57a0da2dd 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out @@ -74,7 +74,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -233,7 +233,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -392,7 +392,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE @@ -545,7 +545,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE @@ -698,7 +698,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -759,7 +759,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), CASE WHEN ((_col2 = 0)) THEN ('0') WHEN ((_col2 = 1)) THEN ('1') WHEN ((_col2 = 2)) THEN ('2') WHEN ((_col2 = 3)) THEN ('3') ELSE ('nothing') END (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), CASE WHEN ((_col2 = 0L)) THEN ('0') WHEN ((_col2 = 1L)) THEN ('1') WHEN ((_col2 = 2L)) THEN ('2') WHEN ((_col2 = 3L)) THEN ('3') ELSE ('nothing') END (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -858,7 +858,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -919,7 +919,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), CASE WHEN ((_col2 = 0)) THEN ('0') WHEN ((_col2 = 1)) THEN ('1') WHEN ((_col2 = 2)) THEN ('2') WHEN ((_col2 = 3)) THEN ('3') ELSE ('nothing') END (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), CASE WHEN ((_col2 = 0L)) THEN ('0') WHEN ((_col2 = 1L)) THEN ('1') WHEN ((_col2 = 2L)) THEN ('2') WHEN ((_col2 = 3L)) THEN ('3') ELSE ('nothing') END (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out index 74f6289fb0..058006c545 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out @@ -77,7 +77,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -279,7 +279,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -491,7 +491,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -798,7 +798,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -1101,7 +1101,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -1429,7 +1429,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -1750,7 +1750,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -1916,7 +1916,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -2134,7 +2134,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out index da4ba84c46..4fc9b8abd9 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out @@ -84,7 +84,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE @@ -93,12 +93,12 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColEqualLongScalar(col 2:bigint, val 1) - predicate: (_col2 = 1) (type: boolean) + predicate: (_col2 = 1L) (type: boolean) Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), 1 (type: bigint) + key expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint) sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), 1 (type: bigint) + Map-reduce partition columns: _col0 (type: int), _col1 (type: int), 1L (type: bigint) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [0, 1, 4] @@ -151,13 +151,13 @@ STAGE PLANS: native: false vectorProcessingMode: MERGE_PARTIAL projectedOutputColumnNums: [0] - keys: KEY._col0 (type: int), KEY._col1 (type: int), 1 (type: bigint) + keys: KEY._col0 (type: int), KEY._col1 (type: int), 1L (type: bigint) mode: mergepartial outputColumnNames: _col0, _col1, _col3 Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE pruneGroupingSetId: true Select Operator - expressions: _col0 (type: int), _col1 (type: int), 1 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -260,7 +260,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE @@ -328,10 +328,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColEqualLongScalar(col 2:bigint, val 1) - predicate: (_col2 = 1) (type: boolean) + predicate: (_col2 = 1L) (type: boolean) Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), 1 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), 1L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out index 137e06a5b5..a4820d9b75 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out @@ -92,7 +92,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -254,7 +254,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -416,7 +416,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -578,7 +578,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -734,7 +734,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: a (type: string), b (type: string), c (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 9936 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out index d8da29ac36..c522822da8 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out @@ -137,7 +137,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -318,7 +318,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -493,7 +493,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 13248 Basic stats: COMPLETE Column stats: NONE @@ -722,7 +722,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3.q.out index 79116112da..80ecd59a16 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3.q.out @@ -70,7 +70,7 @@ STAGE PLANS: Statistics: Num rows: 12 Data size: 6624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(c), count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 48 Data size: 26496 Basic stats: COMPLETE Column stats: NONE @@ -178,7 +178,7 @@ STAGE PLANS: Statistics: Num rows: 12 Data size: 6624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(c), count() - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 48 Data size: 26496 Basic stats: COMPLETE Column stats: NONE @@ -356,7 +356,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0, 1] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 48 Data size: 26496 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out index 8d2247d42e..b7f0825cb6 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out @@ -85,7 +85,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0, 1] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 48 Data size: 23040 Basic stats: COMPLETE Column stats: NONE @@ -223,7 +223,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0, 1] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 48 Data size: 23040 Basic stats: COMPLETE Column stats: NONE @@ -446,7 +446,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0, 1] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 48 Data size: 23040 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out index 55c18fa2f1..2463f2d13b 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out @@ -74,7 +74,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColLessDoubleScalar(col 4:double, val 3.0)(children: CastStringToDouble(col 0:string) -> 4:double) - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -86,7 +86,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 8 Data size: 2944 Basic stats: COMPLETE Column stats: NONE @@ -318,7 +318,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColLessDoubleScalar(col 4:double, val 3.0)(children: CastStringToDouble(col 0:string) -> 4:double) - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -330,7 +330,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 8 Data size: 2944 Basic stats: COMPLETE Column stats: NONE @@ -593,7 +593,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColLessDoubleScalar(col 4:double, val 3.0)(children: CastStringToDouble(col 0:string) -> 4:double) - predicate: (UDFToDouble(a) < 3.0) (type: boolean) + predicate: (UDFToDouble(a) < 3.0D) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -664,7 +664,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 8 Data size: 2944 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out index 4e579888e6..78a8e5b968 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out @@ -148,7 +148,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -340,7 +340,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -601,7 +601,7 @@ STAGE PLANS: native: false vectorProcessingMode: STREAMING projectedOutputColumnNums: [0] - keys: KEY._col0 (type: string), KEY._col1 (type: string), 0 (type: bigint) + keys: KEY._col0 (type: string), KEY._col1 (type: string), 0L (type: bigint) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out index 83c7ad8f00..1132a4f619 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out @@ -68,7 +68,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColEqualDoubleScalar(col 4:double, val 5.0)(children: CastStringToDouble(col 0:string) -> 4:double) - predicate: (UDFToDouble(a) = 5.0) (type: boolean) + predicate: (UDFToDouble(a) = 5.0D) (type: boolean) Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE Group By Operator Group By Vectorization: @@ -78,7 +78,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE @@ -211,7 +211,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColEqualDoubleScalar(col 4:double, val 5.0)(children: CastStringToDouble(col 0:string) -> 4:double) - predicate: (UDFToDouble(a) = 5.0) (type: boolean) + predicate: (UDFToDouble(a) = 5.0D) (type: boolean) Statistics: Num rows: 3 Data size: 1104 Basic stats: COMPLETE Column stats: NONE Group By Operator Group By Vectorization: @@ -221,7 +221,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out index 8dd5cf074c..0d147a181b 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out @@ -78,7 +78,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -239,7 +239,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -407,7 +407,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -576,7 +576,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: int), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: int), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -782,7 +782,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -843,13 +843,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1) (type: bigint), grouping(_col2, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4] - selectExpressions: VectorUDFAdaptor(grouping(_col2, 1)) -> 3:bigint, VectorUDFAdaptor(grouping(_col2, 0)) -> 4:bigint + selectExpressions: VectorUDFAdaptor(grouping(_col2, 1L)) -> 3:bigint, VectorUDFAdaptor(grouping(_col2, 0L)) -> 4:bigint Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -943,7 +943,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -1004,13 +1004,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1) (type: bigint), grouping(_col2, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L) (type: bigint), grouping(_col2, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4] - selectExpressions: VectorUDFAdaptor(grouping(_col2, 1)) -> 3:bigint, VectorUDFAdaptor(grouping(_col2, 0)) -> 4:bigint + selectExpressions: VectorUDFAdaptor(grouping(_col2, 1L)) -> 3:bigint, VectorUDFAdaptor(grouping(_col2, 0L)) -> 4:bigint Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1111,7 +1111,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -1119,8 +1119,8 @@ STAGE PLANS: Filter Vectorization: className: VectorFilterOperator native: true - predicateExpression: FilterLongColEqualLongScalar(col 3:bigint, val 1)(children: VectorUDFAdaptor(grouping(_col2, 1)) -> 3:bigint) - predicate: (grouping(_col2, 1) = 1) (type: boolean) + predicateExpression: FilterLongColEqualLongScalar(col 3:bigint, val 1)(children: VectorUDFAdaptor(grouping(_col2, 1L)) -> 3:bigint) + predicate: (grouping(_col2, 1L) = 1) (type: boolean) Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) @@ -1273,7 +1273,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -1281,8 +1281,8 @@ STAGE PLANS: Filter Vectorization: className: VectorFilterOperator native: true - predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 3:bigint, val 1)(children: VectorUDFAdaptor(grouping(_col2, 1)) -> 3:bigint), FilterLongColEqualLongScalar(col 3:bigint, val 1)(children: VectorUDFAdaptor(grouping(_col2, 0)) -> 3:bigint)) - predicate: ((grouping(_col2, 0) = 1) or (grouping(_col2, 1) = 1)) (type: boolean) + predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 3:bigint, val 1)(children: VectorUDFAdaptor(grouping(_col2, 1L)) -> 3:bigint), FilterLongColEqualLongScalar(col 3:bigint, val 1)(children: VectorUDFAdaptor(grouping(_col2, 0L)) -> 3:bigint)) + predicate: ((grouping(_col2, 0L) = 1) or (grouping(_col2, 1L) = 1)) (type: boolean) Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) @@ -1341,16 +1341,16 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1) + grouping(_col2, 0)) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), (grouping(_col2, 1L) + grouping(_col2, 0L)) (type: bigint) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 5] - selectExpressions: LongColAddLongColumn(col 3:bigint, col 4:bigint)(children: VectorUDFAdaptor(grouping(_col2, 1)) -> 3:bigint, VectorUDFAdaptor(grouping(_col2, 0)) -> 4:bigint) -> 5:bigint + selectExpressions: LongColAddLongColumn(col 3:bigint, col 4:bigint)(children: VectorUDFAdaptor(grouping(_col2, 1L)) -> 3:bigint, VectorUDFAdaptor(grouping(_col2, 0L)) -> 4:bigint) -> 5:bigint Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col2 (type: bigint), CASE WHEN ((_col2 = 1)) THEN (_col0) END (type: int) + key expressions: _col2 (type: bigint), CASE WHEN ((_col2 = 1L)) THEN (_col0) END (type: int) sort order: -+ Reduce Sink Vectorization: className: VectorReduceSinkObjectHashOperator @@ -1480,7 +1480,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -1541,13 +1541,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3] - selectExpressions: VectorUDFAdaptor(grouping(_col2, 1, 0)) -> 3:bigint + selectExpressions: VectorUDFAdaptor(grouping(_col2, 1L, 0L)) -> 3:bigint Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1646,7 +1646,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 24 Data size: 192 Basic stats: COMPLETE Column stats: NONE @@ -1707,13 +1707,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0, 1) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0L, 1L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3] - selectExpressions: VectorUDFAdaptor(grouping(_col2, 0, 1)) -> 3:bigint + selectExpressions: VectorUDFAdaptor(grouping(_col2, 0L, 1L)) -> 3:bigint Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1812,7 +1812,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -1873,13 +1873,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1, 0) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 1L, 0L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3] - selectExpressions: VectorUDFAdaptor(grouping(_col2, 1, 0)) -> 3:bigint + selectExpressions: VectorUDFAdaptor(grouping(_col2, 1L, 0L)) -> 3:bigint Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1973,7 +1973,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: key (type: int), value (type: int), 0 (type: bigint) + keys: key (type: int), value (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 18 Data size: 144 Basic stats: COMPLETE Column stats: NONE @@ -2034,13 +2034,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0, 1) (type: bigint) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), grouping(_col2, 0L, 1L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3] - selectExpressions: VectorUDFAdaptor(grouping(_col2, 0, 1)) -> 3:bigint + selectExpressions: VectorUDFAdaptor(grouping(_col2, 0L, 1L)) -> 3:bigint Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out index f7f3014a7f..030734d059 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out @@ -78,7 +78,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -279,7 +279,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 8832 Basic stats: COMPLETE Column stats: NONE @@ -480,7 +480,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: a (type: string), b (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -679,7 +679,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: a (type: string), b (type: string), c (type: string), 0 (type: bigint) + keys: a (type: string), b (type: string), c (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 9936 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out index 9374bcc27a..a3fbde499b 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out @@ -76,7 +76,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0, 1] - keys: category (type: int), 0 (type: bigint) + keys: category (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 60 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out index 82b6ba0a09..98e6e54f25 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out @@ -157,7 +157,7 @@ STAGE PLANS: 1 Reducer 6 Statistics: Num rows: 500 Data size: 98620 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 500 Data size: 98620 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out index 717c218da9..8a98ce6314 100644 --- a/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out @@ -74,7 +74,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE @@ -231,7 +231,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -358,7 +358,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE @@ -560,7 +560,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -733,7 +733,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE @@ -768,7 +768,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [0] - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out index 172e003af6..ec3e2b8631 100644 --- a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out @@ -172,7 +172,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: s_store_id (type: string), 0 (type: bigint) + keys: s_store_id (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 24 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -303,7 +303,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), 0 (type: bigint) + keys: _col0 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 24 Data size: 4416 Basic stats: COMPLETE Column stats: NONE @@ -424,7 +424,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 12 Data size: 2208 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: string), 0 (type: bigint) + keys: _col0 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 24 Data size: 4416 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out index 7d891db4ef..dcedca8220 100644 --- a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out @@ -128,7 +128,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: string), 1-2 (type: interval_year_month), VALUE._col0 (type: interval_year_month), 1 02:03:04.000000000 (type: interval_day_time), VALUE._col1 (type: interval_day_time) + expressions: KEY.reducesinkkey0 (type: string), INTERVAL'1-2' (type: interval_year_month), VALUE._col0 (type: interval_year_month), INTERVAL'1 02:03:04.000000000' (type: interval_day_time), VALUE._col1 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator @@ -219,7 +219,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month) + expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (INTERVAL'1-2' + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (INTERVAL'1-2' - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator @@ -257,7 +257,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: date), 2-4 (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), 0-0 (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month) + expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2-4' (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), INTERVAL'0-0' (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -356,7 +356,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (CAST( str2 AS INTERVAL DAY TO SECOND) + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (1 02:03:04.000000000 + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (CAST( str2 AS INTERVAL DAY TO SECOND) - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (1 02:03:04.000000000 - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time) + expressions: dt (type: date), (CAST( str2 AS INTERVAL DAY TO SECOND) + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (INTERVAL'1 02:03:04.000000000' + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (CAST( str2 AS INTERVAL DAY TO SECOND) - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (INTERVAL'1 02:03:04.000000000' - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator @@ -394,7 +394,7 @@ STAGE PLANS: vectorized: true Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: date), 2 04:06:08.000000000 (type: interval_day_time), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), 0 00:00:00.000000000 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time) + expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2 04:06:08.000000000' (type: interval_day_time), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), INTERVAL'0 00:00:00.000000000' (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -505,7 +505,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (dt + 1-2) (type: date), (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (1-2 + dt) (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + dt) (type: date), (dt - 1-2) (type: date), (dt - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (dt + 1 02:03:04.000000000) (type: timestamp), (dt + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (1 02:03:04.000000000 + dt) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + dt) (type: timestamp), (dt - 1 02:03:04.000000000) (type: timestamp), (dt - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) + expressions: dt (type: date), (dt + INTERVAL'1-2') (type: date), (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (INTERVAL'1-2' + dt) (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + dt) (type: date), (dt - INTERVAL'1-2') (type: date), (dt - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (dt + INTERVAL'1 02:03:04.000000000') (type: timestamp), (dt + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (INTERVAL'1 02:03:04.000000000' + dt) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + dt) (type: timestamp), (dt - INTERVAL'1 02:03:04.000000000') (type: timestamp), (dt - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Select Vectorization: className: VectorSelectOperator @@ -665,7 +665,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: ts (type: timestamp), (ts + 1-2) (type: timestamp), (ts + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (1-2 + ts) (type: timestamp), (CAST( str1 AS INTERVAL YEAR TO MONTH) + ts) (type: timestamp), (ts - 1-2) (type: timestamp), (ts - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (ts + 1 02:03:04.000000000) (type: timestamp), (ts + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (1 02:03:04.000000000 + ts) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + ts) (type: timestamp), (ts - 1 02:03:04.000000000) (type: timestamp), (ts - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) + expressions: ts (type: timestamp), (ts + INTERVAL'1-2') (type: timestamp), (ts + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (INTERVAL'1-2' + ts) (type: timestamp), (CAST( str1 AS INTERVAL YEAR TO MONTH) + ts) (type: timestamp), (ts - INTERVAL'1-2') (type: timestamp), (ts - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (ts + INTERVAL'1 02:03:04.000000000') (type: timestamp), (ts + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (INTERVAL'1 02:03:04.000000000' + ts) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + ts) (type: timestamp), (ts - INTERVAL'1 02:03:04.000000000') (type: timestamp), (ts - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Select Vectorization: className: VectorSelectOperator @@ -807,7 +807,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: ts (type: timestamp), (ts - ts) (type: interval_day_time), (2001-01-01 01:02:03.0 - ts) (type: interval_day_time), (ts - 2001-01-01 01:02:03.0) (type: interval_day_time) + expressions: ts (type: timestamp), (ts - ts) (type: interval_day_time), (TIMESTAMP'2001-01-01 01:02:03.0' - ts) (type: interval_day_time), (ts - TIMESTAMP'2001-01-01 01:02:03.0') (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -931,7 +931,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (dt - dt) (type: interval_day_time), (2001-01-01 - dt) (type: interval_day_time), (dt - 2001-01-01) (type: interval_day_time) + expressions: dt (type: date), (dt - dt) (type: interval_day_time), (DATE'2001-01-01' - dt) (type: interval_day_time), (dt - DATE'2001-01-01') (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -1061,7 +1061,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (ts - dt) (type: interval_day_time), (2001-01-01 01:02:03.0 - dt) (type: interval_day_time), (ts - 2001-01-01) (type: interval_day_time), (dt - ts) (type: interval_day_time), (dt - 2001-01-01 01:02:03.0) (type: interval_day_time), (2001-01-01 - ts) (type: interval_day_time) + expressions: dt (type: date), (ts - dt) (type: interval_day_time), (TIMESTAMP'2001-01-01 01:02:03.0' - dt) (type: interval_day_time), (ts - DATE'2001-01-01') (type: interval_day_time), (dt - ts) (type: interval_day_time), (dt - TIMESTAMP'2001-01-01 01:02:03.0') (type: interval_day_time), (DATE'2001-01-01' - ts) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out index 7548686c50..2ee7502687 100644 --- a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out @@ -130,7 +130,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: str1 (type: string), (CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= 1-2) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3) (type: boolean), (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 <= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 < CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean) + expressions: str1 (type: string), (CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) = INTERVAL'1-2') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= INTERVAL'1-2') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <= INTERVAL'1-3') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-3') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) > INTERVAL'1-2') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> INTERVAL'1-3') (type: boolean), (INTERVAL'1-2' = CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' <= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' < CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-3' >= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-3' > CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' <> CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Select Vectorization: className: VectorSelectOperator @@ -336,7 +336,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: str1 (type: string), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) > CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) < CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-2) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= 1-3) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) > 1-3) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) <= 1-2) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) < 1-2) (type: boolean), (1-2 <> CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 >= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-2 > CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (1-3 < CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean) + expressions: str1 (type: string), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) > CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) < CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) <> INTERVAL'1-2') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-3') (type: boolean), (CAST( str1 AS INTERVAL YEAR TO MONTH) > INTERVAL'1-3') (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) <= INTERVAL'1-2') (type: boolean), (CAST( str2 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-2') (type: boolean), (INTERVAL'1-2' <> CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' >= CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-2' > CAST( str2 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-3' <= CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean), (INTERVAL'1-3' < CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col7, _col8, _col9, _col10, _col11, _col13, _col14, _col15, _col16, _col17 Select Vectorization: className: VectorSelectOperator @@ -542,7 +542,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: str3 (type: string), (CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000) (type: boolean), (1 02:03:04.000000000 = CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean) + expressions: str3 (type: string), (CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) = INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <= INTERVAL'1 02:03:05.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) < INTERVAL'1 02:03:05.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) >= INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) > INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> INTERVAL'1 02:03:05.000000000') (type: boolean), (INTERVAL'1 02:03:04.000000000' = CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' <= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' < CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:05.000000000' >= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:05.000000000' > CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' <> CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Select Vectorization: className: VectorSelectOperator @@ -748,7 +748,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: str3 (type: string), (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) > CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) < CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:04.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= 1 02:03:05.000000000) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) > 1 02:03:05.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) <= 1 02:03:04.000000000) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) < 1 02:03:04.000000000) (type: boolean), (1 02:03:04.000000000 <> CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 >= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:04.000000000 > CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (1 02:03:05.000000000 < CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean) + expressions: str3 (type: string), (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) > CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) < CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) <> INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) >= INTERVAL'1 02:03:05.000000000') (type: boolean), (CAST( str3 AS INTERVAL DAY TO SECOND) > INTERVAL'1 02:03:05.000000000') (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) <= INTERVAL'1 02:03:04.000000000') (type: boolean), (CAST( str4 AS INTERVAL DAY TO SECOND) < INTERVAL'1 02:03:04.000000000') (type: boolean), (INTERVAL'1 02:03:04.000000000' <> CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' >= CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:04.000000000' > CAST( str4 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:05.000000000' <= CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean), (INTERVAL'1 02:03:05.000000000' < CAST( str3 AS INTERVAL DAY TO SECOND)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col7, _col8, _col9, _col10, _col11, _col13, _col14, _col15, _col16, _col17 Select Vectorization: className: VectorSelectOperator @@ -944,7 +944,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongColumn(col 7:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month), FilterLongColNotEqualLongColumn(col 7:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month, CastStringToIntervalYearMonth(col 3:string) -> 8:interval_year_month), FilterLongColLessEqualLongColumn(col 7:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month, CastStringToIntervalYearMonth(col 3:string) -> 8:interval_year_month), FilterLongColLessLongColumn(col 7:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month, CastStringToIntervalYearMonth(col 3:string) -> 8:interval_year_month), FilterLongColGreaterEqualLongColumn(col 7:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month), FilterLongColGreaterLongColumn(col 7:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month), FilterIntervalYearMonthColEqualIntervalYearMonthScalar(col 7:interval_year_month, val 14)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month), FilterIntervalYearMonthColNotEqualIntervalYearMonthScalar(col 7:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month), FilterIntervalYearMonthColLessEqualIntervalYearMonthScalar(col 7:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month), FilterIntervalYearMonthColLessIntervalYearMonthScalar(col 7:interval_year_month, val 15)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month), FilterIntervalYearMonthColGreaterEqualIntervalYearMonthScalar(col 7:interval_year_month, val 14)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month), FilterIntervalYearMonthColGreaterIntervalYearMonthScalar(col 7:interval_year_month, val 14)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month), FilterIntervalYearMonthScalarEqualIntervalYearMonthColumn(val 14, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month), FilterIntervalYearMonthScalarNotEqualIntervalYearMonthColumn(val 14, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month), FilterIntervalYearMonthScalarLessEqualIntervalYearMonthColumn(val 14, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month), FilterIntervalYearMonthScalarLessIntervalYearMonthColumn(val 14, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 3:string) -> 7:interval_year_month), FilterIntervalYearMonthScalarGreaterEqualIntervalYearMonthColumn(val 15, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month), FilterIntervalYearMonthScalarGreaterIntervalYearMonthColumn(val 15, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month)) - predicate: ((1-2 < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean) + predicate: ((CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> INTERVAL'1-3') and (CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = INTERVAL'1-2') and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > INTERVAL'1-2') and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= INTERVAL'1-2') and (INTERVAL'1-2' < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (INTERVAL'1-2' = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (INTERVAL'1-3' > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (INTERVAL'1-3' >= CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean) Statistics: Num rows: 1 Data size: 408 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) @@ -1138,7 +1138,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterIntervalDayTimeColEqualIntervalDayTimeColumn(col 7:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time, CastStringToIntervalDayTime(col 4:string) -> 8:interval_day_time), FilterIntervalDayTimeColNotEqualIntervalDayTimeColumn(col 7:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time, CastStringToIntervalDayTime(col 5:string) -> 8:interval_day_time), FilterIntervalDayTimeColLessEqualIntervalDayTimeColumn(col 7:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time, CastStringToIntervalDayTime(col 5:string) -> 8:interval_day_time), FilterIntervalDayTimeColLessIntervalDayTimeColumn(col 7:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time, CastStringToIntervalDayTime(col 5:string) -> 8:interval_day_time), FilterIntervalDayTimeColGreaterEqualIntervalDayTimeColumn(col 7:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time, CastStringToIntervalDayTime(col 4:string) -> 8:interval_day_time), FilterIntervalDayTimeColGreaterIntervalDayTimeColumn(col 7:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time, CastStringToIntervalDayTime(col 4:string) -> 8:interval_day_time), FilterIntervalDayTimeColEqualIntervalDayTimeScalar(col 7:interval_day_time, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time), FilterIntervalDayTimeColNotEqualIntervalDayTimeScalar(col 7:interval_day_time, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time), FilterIntervalDayTimeColLessEqualIntervalDayTimeScalar(col 7:interval_day_time, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time), FilterIntervalDayTimeColLessIntervalDayTimeScalar(col 7:interval_day_time, val 1 02:03:05.000000000)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time), FilterIntervalDayTimeColGreaterEqualIntervalDayTimeScalar(col 7:interval_day_time, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time), FilterIntervalDayTimeColGreaterIntervalDayTimeScalar(col 7:interval_day_time, val 1 02:03:04.000000000)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time), FilterIntervalDayTimeScalarEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 7:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time), FilterIntervalDayTimeScalarNotEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 7:interval_day_time)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time), FilterIntervalDayTimeScalarLessEqualIntervalDayTimeColumn(val 1 02:03:04.000000000, col 7:interval_day_time)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time), FilterIntervalDayTimeScalarLessIntervalDayTimeColumn(val 1 02:03:04.000000000, col 7:interval_day_time)(children: CastStringToIntervalDayTime(col 5:string) -> 7:interval_day_time), FilterIntervalDayTimeScalarGreaterEqualIntervalDayTimeColumn(val 1 02:03:05.000000000, col 7:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time), FilterIntervalDayTimeScalarGreaterIntervalDayTimeColumn(val 1 02:03:05.000000000, col 7:interval_day_time)(children: CastStringToIntervalDayTime(col 4:string) -> 7:interval_day_time)) - predicate: ((1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 = CAST( str3 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean) + predicate: ((CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> INTERVAL'1 02:03:05.000000000') and (CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) = INTERVAL'1 02:03:04.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > INTERVAL'1 02:03:04.000000000') and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= INTERVAL'1 02:03:04.000000000') and (INTERVAL'1 02:03:04.000000000' < CAST( str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:04.000000000' = CAST( str3 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:05.000000000' > CAST( str3 AS INTERVAL DAY TO SECOND)) and (INTERVAL'1 02:03:05.000000000' >= CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean) Statistics: Num rows: 1 Data size: 408 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) @@ -1322,7 +1322,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDateScalarEqualDateColumn(val 11747, col 8:date)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterDateScalarLessEqualDateColumn(val 11747, col 8:date)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterDateScalarGreaterEqualDateColumn(val 11747, col 8:date)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterDateColEqualDateScalar(col 8:date, val 11747)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterDateColLessEqualDateScalar(col 8:date, val 11747)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterDateColGreaterEqualDateScalar(col 8:date, val 11747)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterLongColNotEqualLongColumn(col 1:date, col 8:date)(children: DateColAddIntervalYearMonthColumn(col 1:date, col 7:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 7:interval_year_month) -> 8:date), FilterDateScalarEqualDateColumn(val 11747, col 7:date)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date), FilterDateScalarLessEqualDateColumn(val 11747, col 7:date)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date), FilterDateScalarGreaterEqualDateColumn(val 11747, col 7:date)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date), FilterDateColEqualDateScalar(col 7:date, val 11747)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date), FilterDateColLessEqualDateScalar(col 7:date, val 11747)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date), FilterDateColGreaterEqualDateScalar(col 7:date, val 11747)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date), FilterLongColNotEqualLongColumn(col 1:date, col 7:date)(children: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 7:date)) - predicate: (((dt + 1-2) <= 2002-03-01) and ((dt + 1-2) = 2002-03-01) and ((dt + 1-2) >= 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= 2002-03-01) and (2002-03-01 <= (dt + 1-2)) and (2002-03-01 <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 = (dt + 1-2)) and (2002-03-01 = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 >= (dt + 1-2)) and (2002-03-01 >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (dt <> (dt + 1-2)) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) (type: boolean) + predicate: (((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= DATE'2002-03-01') and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = DATE'2002-03-01') and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= DATE'2002-03-01') and ((dt + INTERVAL'1-2') <= DATE'2002-03-01') and ((dt + INTERVAL'1-2') = DATE'2002-03-01') and ((dt + INTERVAL'1-2') >= DATE'2002-03-01') and (DATE'2002-03-01' <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (DATE'2002-03-01' <= (dt + INTERVAL'1-2')) and (DATE'2002-03-01' = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (DATE'2002-03-01' = (dt + INTERVAL'1-2')) and (DATE'2002-03-01' >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (DATE'2002-03-01' >= (dt + INTERVAL'1-2')) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (dt <> (dt + INTERVAL'1-2'))) (type: boolean) Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) @@ -1506,7 +1506,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterTimestampScalarEqualTimestampColumn(val 2002-03-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampScalarLessEqualTimestampColumn(val 2002-03-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampScalarGreaterEqualTimestampColumn(val 2002-03-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampScalarNotEqualTimestampColumn(val 2002-04-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampScalarLessTimestampColumn(val 2002-02-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampScalarGreaterTimestampColumn(val 2002-04-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColEqualTimestampScalar(col 7:timestamp, val 2002-03-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColGreaterEqualTimestampScalar(col 7:timestamp, val 2002-03-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColLessEqualTimestampScalar(col 7:timestamp, val 2002-03-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColNotEqualTimestampScalar(col 7:timestamp, val 2002-04-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColGreaterTimestampScalar(col 7:timestamp, val 2002-02-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColLessTimestampScalar(col 7:timestamp, val 2002-04-01 01:02:03.0)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 7:timestamp), FilterTimestampColEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 0-0) -> 7:timestamp), FilterTimestampColNotEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-0) -> 7:timestamp), FilterTimestampColLessEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-0) -> 7:timestamp), FilterTimestampColLessTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-0) -> 7:timestamp), FilterTimestampColGreaterEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColSubtractIntervalYearMonthScalar(col 0:timestamp, val 1-0) -> 7:timestamp), FilterTimestampColGreaterTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColSubtractIntervalYearMonthScalar(col 0:timestamp, val 1-0) -> 7:timestamp)) - predicate: (((ts + 1-2) < 2002-04-01 01:02:03.0) and ((ts + 1-2) <= 2002-03-01 01:02:03.0) and ((ts + 1-2) <> 2002-04-01 01:02:03.0) and ((ts + 1-2) = 2002-03-01 01:02:03.0) and ((ts + 1-2) > 2002-02-01 01:02:03.0) and ((ts + 1-2) >= 2002-03-01 01:02:03.0) and (2002-02-01 01:02:03.0 < (ts + 1-2)) and (2002-03-01 01:02:03.0 <= (ts + 1-2)) and (2002-03-01 01:02:03.0 = (ts + 1-2)) and (2002-03-01 01:02:03.0 >= (ts + 1-2)) and (2002-04-01 01:02:03.0 <> (ts + 1-2)) and (2002-04-01 01:02:03.0 > (ts + 1-2)) and (ts < (ts + 1-0)) and (ts <= (ts + 1-0)) and (ts <> (ts + 1-0)) and (ts = (ts + 0-0)) and (ts > (ts - 1-0)) and (ts >= (ts - 1-0))) (type: boolean) + predicate: (((ts + INTERVAL'1-2') < TIMESTAMP'2002-04-01 01:02:03.0') and ((ts + INTERVAL'1-2') <= TIMESTAMP'2002-03-01 01:02:03.0') and ((ts + INTERVAL'1-2') <> TIMESTAMP'2002-04-01 01:02:03.0') and ((ts + INTERVAL'1-2') = TIMESTAMP'2002-03-01 01:02:03.0') and ((ts + INTERVAL'1-2') > TIMESTAMP'2002-02-01 01:02:03.0') and ((ts + INTERVAL'1-2') >= TIMESTAMP'2002-03-01 01:02:03.0') and (TIMESTAMP'2002-02-01 01:02:03.0' < (ts + INTERVAL'1-2')) and (TIMESTAMP'2002-03-01 01:02:03.0' <= (ts + INTERVAL'1-2')) and (TIMESTAMP'2002-03-01 01:02:03.0' = (ts + INTERVAL'1-2')) and (TIMESTAMP'2002-03-01 01:02:03.0' >= (ts + INTERVAL'1-2')) and (TIMESTAMP'2002-04-01 01:02:03.0' <> (ts + INTERVAL'1-2')) and (TIMESTAMP'2002-04-01 01:02:03.0' > (ts + INTERVAL'1-2')) and (ts < (ts + INTERVAL'1-0')) and (ts <= (ts + INTERVAL'1-0')) and (ts <> (ts + INTERVAL'1-0')) and (ts = (ts + INTERVAL'0-0')) and (ts > (ts - INTERVAL'1-0')) and (ts >= (ts - INTERVAL'1-0'))) (type: boolean) Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) @@ -1700,7 +1700,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterTimestampScalarEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampScalarNotEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampScalarLessEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampScalarGreaterEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampColEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampColNotEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampColGreaterEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampColGreaterTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampColLessEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampColLessTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampColEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampColNotEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampColLessEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampColLessTimestampColumn(col 0:timestamp, col 7:timestamp)(children: DateColAddIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp), FilterTimestampColGreaterEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 01:02:03.000000000) -> 7:timestamp), FilterTimestampColGreaterTimestampColumn(col 0:timestamp, col 7:timestamp)(children: DateColSubtractIntervalDayTimeScalar(col 1:date, val 0 01:02:04.000000000) -> 7:timestamp)) - predicate: (((dt + 0 01:02:03.000000000) = 2001-01-01 01:02:03.0) and ((dt + 0 01:02:03.000000000) >= 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) <> 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) > 2001-01-01 01:02:03.0) and ((dt - 0 01:02:03.000000000) <= 2001-01-01 01:02:03.0) and ((dt - 0 01:02:04.000000000) < 2001-01-01 01:02:03.0) and (2001-01-01 01:02:03.0 < (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 <= (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 <> (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 = (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 > (dt - 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 >= (dt - 0 01:02:03.000000000)) and (ts < (dt + 0 01:02:04.000000000)) and (ts <= (dt + 0 01:02:03.000000000)) and (ts <> (dt + 0 01:02:04.000000000)) and (ts = (dt + 0 01:02:03.000000000)) and (ts > (dt - 0 01:02:04.000000000)) and (ts >= (dt - 0 01:02:03.000000000))) (type: boolean) + predicate: (((dt + INTERVAL'0 01:02:03.000000000') = TIMESTAMP'2001-01-01 01:02:03.0') and ((dt + INTERVAL'0 01:02:03.000000000') >= TIMESTAMP'2001-01-01 01:02:03.0') and ((dt + INTERVAL'0 01:02:04.000000000') <> TIMESTAMP'2001-01-01 01:02:03.0') and ((dt + INTERVAL'0 01:02:04.000000000') > TIMESTAMP'2001-01-01 01:02:03.0') and ((dt - INTERVAL'0 01:02:03.000000000') <= TIMESTAMP'2001-01-01 01:02:03.0') and ((dt - INTERVAL'0 01:02:04.000000000') < TIMESTAMP'2001-01-01 01:02:03.0') and (TIMESTAMP'2001-01-01 01:02:03.0' < (dt + INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' <= (dt + INTERVAL'0 01:02:03.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' <> (dt + INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' = (dt + INTERVAL'0 01:02:03.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' > (dt - INTERVAL'0 01:02:04.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' >= (dt - INTERVAL'0 01:02:03.000000000')) and (ts < (dt + INTERVAL'0 01:02:04.000000000')) and (ts <= (dt + INTERVAL'0 01:02:03.000000000')) and (ts <> (dt + INTERVAL'0 01:02:04.000000000')) and (ts = (dt + INTERVAL'0 01:02:03.000000000')) and (ts > (dt - INTERVAL'0 01:02:04.000000000')) and (ts >= (dt - INTERVAL'0 01:02:03.000000000'))) (type: boolean) Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) @@ -1894,7 +1894,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterTimestampScalarEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 0 00:00:00.000000000) -> 7:timestamp), FilterTimestampScalarNotEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampScalarLessEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampScalarLessTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampScalarGreaterEqualTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampScalarGreaterTimestampColumn(val 2001-01-01 01:02:03.0, col 7:timestamp)(children: TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 0 00:00:00.000000000) -> 7:timestamp), FilterTimestampColNotEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColGreaterEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColGreaterTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColLessEqualTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColLessTimestampScalar(col 7:timestamp, val 2001-01-01 01:02:03.0)(children: TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 0 00:00:00.000000000) -> 7:timestamp), FilterTimestampColNotEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColLessEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColLessTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColGreaterEqualTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp), FilterTimestampColGreaterTimestampColumn(col 0:timestamp, col 7:timestamp)(children: TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 00:00:00.000000000) -> 7:timestamp)) - predicate: (((ts + 0 00:00:00.000000000) = 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) <> 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) > 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) >= 2001-01-01 01:02:03.0) and ((ts - 1 00:00:00.000000000) < 2001-01-01 01:02:03.0) and ((ts - 1 00:00:00.000000000) <= 2001-01-01 01:02:03.0) and (2001-01-01 01:02:03.0 < (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <= (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <> (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 = (ts + 0 00:00:00.000000000)) and (2001-01-01 01:02:03.0 > (ts - 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 >= (ts - 1 00:00:00.000000000)) and (ts < (ts + 1 00:00:00.000000000)) and (ts <= (ts + 1 00:00:00.000000000)) and (ts <> (ts + 1 00:00:00.000000000)) and (ts = (ts + 0 00:00:00.000000000)) and (ts > (ts - 1 00:00:00.000000000)) and (ts >= (ts - 1 00:00:00.000000000))) (type: boolean) + predicate: (((ts + INTERVAL'0 00:00:00.000000000') = TIMESTAMP'2001-01-01 01:02:03.0') and ((ts + INTERVAL'1 00:00:00.000000000') <> TIMESTAMP'2001-01-01 01:02:03.0') and ((ts + INTERVAL'1 00:00:00.000000000') > TIMESTAMP'2001-01-01 01:02:03.0') and ((ts + INTERVAL'1 00:00:00.000000000') >= TIMESTAMP'2001-01-01 01:02:03.0') and ((ts - INTERVAL'1 00:00:00.000000000') < TIMESTAMP'2001-01-01 01:02:03.0') and ((ts - INTERVAL'1 00:00:00.000000000') <= TIMESTAMP'2001-01-01 01:02:03.0') and (TIMESTAMP'2001-01-01 01:02:03.0' < (ts + INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' <= (ts + INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' <> (ts + INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' = (ts + INTERVAL'0 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' > (ts - INTERVAL'1 00:00:00.000000000')) and (TIMESTAMP'2001-01-01 01:02:03.0' >= (ts - INTERVAL'1 00:00:00.000000000')) and (ts < (ts + INTERVAL'1 00:00:00.000000000')) and (ts <= (ts + INTERVAL'1 00:00:00.000000000')) and (ts <> (ts + INTERVAL'1 00:00:00.000000000')) and (ts = (ts + INTERVAL'0 00:00:00.000000000')) and (ts > (ts - INTERVAL'1 00:00:00.000000000')) and (ts >= (ts - INTERVAL'1 00:00:00.000000000'))) (type: boolean) Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out index f2a4d3a855..87993d2b49 100644 --- a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out @@ -84,7 +84,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dateval (type: date), (dateval - 2-2) (type: date), (dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + -2-2) (type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date) + expressions: dateval (type: date), (dateval - INTERVAL'2-2') (type: date), (dateval - INTERVAL'-2-2') (type: date), (dateval + INTERVAL'2-2') (type: date), (dateval + INTERVAL'-2-2') (type: date), (INTERVAL'-2-2' + dateval) (type: date), (INTERVAL'2-2' + dateval) (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -266,7 +266,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dateval (type: date), (dateval - 1999-06-07) (type: interval_day_time), (1999-06-07 - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) + expressions: dateval (type: date), (dateval - DATE'1999-06-07') (type: interval_day_time), (DATE'1999-06-07' - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -448,7 +448,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: tsval (type: timestamp), (tsval - 2-2) (type: timestamp), (tsval - -2-2) (type: timestamp), (tsval + 2-2) (type: timestamp), (tsval + -2-2) (type: timestamp), (-2-2 + tsval) (type: timestamp), (2-2 + tsval) (type: timestamp) + expressions: tsval (type: timestamp), (tsval - INTERVAL'2-2') (type: timestamp), (tsval - INTERVAL'-2-2') (type: timestamp), (tsval + INTERVAL'2-2') (type: timestamp), (tsval + INTERVAL'-2-2') (type: timestamp), (INTERVAL'-2-2' + tsval) (type: timestamp), (INTERVAL'2-2' + tsval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -625,7 +625,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + expressions: INTERVAL'5-5' (type: interval_year_month), INTERVAL'-1-1' (type: interval_year_month) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator @@ -737,7 +737,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dateval (type: date), (dateval - 99 11:22:33.123456789) (type: timestamp), (dateval - -99 11:22:33.123456789) (type: timestamp), (dateval + 99 11:22:33.123456789) (type: timestamp), (dateval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + dateval) (type: timestamp), (99 11:22:33.123456789 + dateval) (type: timestamp) + expressions: dateval (type: date), (dateval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + dateval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + dateval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -1105,7 +1105,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: tsval (type: timestamp), (tsval - 99 11:22:33.123456789) (type: timestamp), (tsval - -99 11:22:33.123456789) (type: timestamp), (tsval + 99 11:22:33.123456789) (type: timestamp), (tsval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + tsval) (type: timestamp), (99 11:22:33.123456789 + tsval) (type: timestamp) + expressions: tsval (type: timestamp), (tsval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + tsval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + tsval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -1280,7 +1280,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: 109 20:30:40.246913578 (type: interval_day_time), 89 02:14:26.000000000 (type: interval_day_time) + expressions: INTERVAL'109 20:30:40.246913578' (type: interval_day_time), INTERVAL'89 02:14:26.000000000' (type: interval_day_time) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out index cd2e1e4c42..7a2cd54900 100644 --- a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out @@ -131,13 +131,13 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: hash(t,si,i,(t < 0),(si <= 0),(i = 0)) (type: int) + expressions: hash(t,si,i,(t < 0Y),(si <= 0S),(i = 0)) (type: int) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [11] - selectExpressions: VectorUDFAdaptor(hash(t,si,i,(t < 0),(si <= 0),(i = 0)))(children: LongColLessLongScalar(col 0:tinyint, val 0) -> 8:boolean, LongColLessEqualLongScalar(col 1:smallint, val 0) -> 9:boolean, LongColEqualLongScalar(col 2:int, val 0) -> 10:boolean) -> 11:int + selectExpressions: VectorUDFAdaptor(hash(t,si,i,(t < 0Y),(si <= 0S),(i = 0)))(children: LongColLessLongScalar(col 0:tinyint, val 0) -> 8:boolean, LongColLessEqualLongScalar(col 1:smallint, val 0) -> 9:boolean, LongColEqualLongScalar(col 2:int, val 0) -> 10:boolean) -> 11:int Statistics: Num rows: 2001 Data size: 22824 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0) @@ -257,13 +257,13 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: hash(t,si,i,b,(t > 0),(si >= 0),(i <> 0),(b > 0)) (type: int) + expressions: hash(t,si,i,b,(t > 0Y),(si >= 0S),(i <> 0),(b > 0L)) (type: int) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [12] - selectExpressions: VectorUDFAdaptor(hash(t,si,i,b,(t > 0),(si >= 0),(i <> 0),(b > 0)))(children: LongColGreaterLongScalar(col 0:tinyint, val 0) -> 8:boolean, LongColGreaterEqualLongScalar(col 1:smallint, val 0) -> 9:boolean, LongColNotEqualLongScalar(col 2:int, val 0) -> 10:boolean, LongColGreaterLongScalar(col 3:bigint, val 0) -> 11:boolean) -> 12:int + selectExpressions: VectorUDFAdaptor(hash(t,si,i,b,(t > 0Y),(si >= 0S),(i <> 0),(b > 0L)))(children: LongColGreaterLongScalar(col 0:tinyint, val 0) -> 8:boolean, LongColGreaterEqualLongScalar(col 1:smallint, val 0) -> 9:boolean, LongColNotEqualLongScalar(col 2:int, val 0) -> 10:boolean, LongColGreaterLongScalar(col 3:bigint, val 0) -> 11:boolean) -> 12:int Statistics: Num rows: 2001 Data size: 38040 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0) diff --git a/ql/src/test/results/clientpositive/llap/vector_nvl.q.out b/ql/src/test/results/clientpositive/llap/vector_nvl.q.out index 837a574b1b..2bbbbe4250 100644 --- a/ql/src/test/results/clientpositive/llap/vector_nvl.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_nvl.q.out @@ -36,7 +36,7 @@ STAGE PLANS: predicate: cdouble is null (type: boolean) Statistics: Num rows: 3114 Data size: 18608 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: null (type: double), 100.0 (type: double) + expressions: null (type: double), 100.0D (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out index 16b59e612c..43707e0b5f 100644 --- a/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out @@ -5640,9 +5640,9 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:p_mfgr:string, 1:p_name:string, 2:p_retailprice:double, 3:ROW__ID:struct] Reduce Output Operator - key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) + key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) sort order: ++ - Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) + Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [0, 6] @@ -5690,13 +5690,13 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 ASC NULLS FIRST, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END ASC NULLS FIRST - partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END + order by: _col0 ASC NULLS FIRST, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END ASC NULLS FIRST + partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END raw input shape: window functions: window function definition alias: rank_window_0 - arguments: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END + arguments: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END name: rank window function: GenericUDAFRankEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) @@ -5809,9 +5809,9 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:p_mfgr:string, 1:p_name:string, 2:p_retailprice:double, 3:ROW__ID:struct] Reduce Output Operator - key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp), p_name (type: string) + key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp), p_name (type: string) sort order: +++ - Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) + Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) Reduce Sink Vectorization: className: VectorReduceSinkObjectHashOperator keyColumnNums: [0, 6, 1] @@ -5873,7 +5873,7 @@ STAGE PLANS: input alias: ptf_1 name: windowingtablefunction order by: _col1 ASC NULLS FIRST - partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END + partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END raw input shape: window functions: window function definition @@ -6541,9 +6541,9 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:p_mfgr:string, 1:p_name:string, 2:p_retailprice:double, 3:ROW__ID:struct] Reduce Output Operator - key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp), p_name (type: string) + key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp), p_name (type: string) sort order: +++ - Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) + Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) Reduce Sink Vectorization: className: VectorReduceSinkObjectHashOperator keyColumnNums: [0, 6, 1] @@ -6605,7 +6605,7 @@ STAGE PLANS: input alias: ptf_1 name: windowingtablefunction order by: _col1 ASC NULLS FIRST - partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END + partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END raw input shape: window functions: window function definition @@ -6743,9 +6743,9 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:p_mfgr:string, 1:p_name:string, 2:p_retailprice:double, 3:ROW__ID:struct] Reduce Output Operator - key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) + key expressions: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) sort order: ++ - Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) + Map-reduce partition columns: p_mfgr (type: string), CASE WHEN ((p_mfgr = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END (type: timestamp) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [0, 6] @@ -6793,13 +6793,13 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 ASC NULLS FIRST, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END ASC NULLS FIRST - partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END + order by: _col0 ASC NULLS FIRST, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END ASC NULLS FIRST + partition by: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END raw input shape: window functions: window function definition alias: rank_window_0 - arguments: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (2000-01-01 00:00:00.0) ELSE (CAST( null AS TIMESTAMP)) END + arguments: _col0, CASE WHEN ((_col0 = 'Manufacturer#2')) THEN (TIMESTAMP'2000-01-01 00:00:00.0') ELSE (CAST( null AS TIMESTAMP)) END name: rank window function: GenericUDAFRankEvaluator window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) diff --git a/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out b/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out index b9c1ba31e8..fdd368d2eb 100644 --- a/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out @@ -101,7 +101,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongScalarEqualLongColumn(val 762, col 3:bigint), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 1:smallint) -> 13:float), FilterDoubleColGreaterDoubleScalar(col 13:double, val -5.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 2:int) -> 13:double)), FilterStringGroupColEqualStringScalar(col 6:string, val a), FilterExprAndExpr(children: FilterDecimalColLessEqualDecimalScalar(col 14:decimal(22,3), val -1.389)(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterStringGroupColNotEqualStringScalar(col 7:string, val a), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 15:decimal(13,3))(children: CastLongToDecimal(col 2:int) -> 15:decimal(13,3)), FilterLongColNotEqualLongColumn(col 11:boolean, col 10:boolean))) - predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (762 = cbigint) or (cstring1 = 'a')) (type: boolean) + predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0D) and (cdouble <> UDFToDouble(cint))) or (762L = cbigint) or (cstring1 = 'a')) (type: boolean) Statistics: Num rows: 5465 Data size: 1157230 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float), cdouble (type: double) @@ -164,7 +164,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 68 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 + -3728.0) (type: double), (- (_col0 + -3728.0)) (type: double), (- (- (_col0 + -3728.0))) (type: double), ((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) * (- (- (_col0 + -3728.0)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175 - _col4) (type: double), (- (10.175 - _col4)) (type: double), ((- _col2) / -563.0) (type: double), _col6 (type: double), (- ((- _col2) / -563.0)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0)) (type: double), (- (_col0 / _col1)) (type: double) + expressions: _col0 (type: double), (_col0 + -3728.0D) (type: double), (- (_col0 + -3728.0D)) (type: double), (- (- (_col0 + -3728.0D))) (type: double), ((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) * (- (- (_col0 + -3728.0D)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0D)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0D)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175D - _col4) (type: double), (- (10.175D - _col4)) (type: double), ((- _col2) / -563.0D) (type: double), _col6 (type: double), (- ((- _col2) / -563.0D)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0D)) (type: double), (- (_col0 / _col1)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -284,7 +284,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongScalarEqualLongColumn(val 762, col 3:bigint), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 1:smallint) -> 13:float), FilterDoubleColGreaterDoubleScalar(col 14:double, val -5.0)(children: CastTimestampToDouble(col 9:timestamp) -> 14:double), FilterDoubleColNotEqualDoubleColumn(col 5:double, col 15:double)(children: CastLongToDouble(col 2:int) -> 15:double)), FilterStringGroupColEqualStringScalar(col 6:string, val a), FilterExprAndExpr(children: FilterDecimalColLessEqualDecimalScalar(col 16:decimal(22,3), val -1.389)(children: CastLongToDecimal(col 3:bigint) -> 16:decimal(22,3)), FilterStringGroupColNotEqualStringScalar(col 7:string, val a), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 17:decimal(13,3))(children: CastLongToDecimal(col 2:int) -> 17:decimal(13,3)), FilterLongColNotEqualLongColumn(col 11:boolean, col 10:boolean))) - predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (762 = cbigint) or (cstring1 = 'a')) (type: boolean) + predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0D) and (cdouble <> UDFToDouble(cint))) or (762L = cbigint) or (cstring1 = 'a')) (type: boolean) Statistics: Num rows: 5465 Data size: 1157230 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float), cdouble (type: double) @@ -347,7 +347,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 68 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 + -3728.0) (type: double), (- (_col0 + -3728.0)) (type: double), (- (- (_col0 + -3728.0))) (type: double), ((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) * (- (- (_col0 + -3728.0)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175 - _col4) (type: double), (- (10.175 - _col4)) (type: double), ((- _col2) / -563.0) (type: double), _col6 (type: double), (- ((- _col2) / -563.0)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0)) (type: double), (- (_col0 / _col1)) (type: double) + expressions: _col0 (type: double), (_col0 + -3728.0D) (type: double), (- (_col0 + -3728.0D)) (type: double), (- (- (_col0 + -3728.0D))) (type: double), ((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) * (- (- (_col0 + -3728.0D)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0D)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0D)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175D - _col4) (type: double), (- (10.175D - _col4)) (type: double), ((- _col2) / -563.0D) (type: double), _col6 (type: double), (- ((- _col2) / -563.0D)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0D)) (type: double), (- (_col0 / _col1)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out index 2ceef58012..5b43765e51 100644 --- a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out @@ -344,7 +344,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0) + 1.0)))), '-'), UDFToString(year(dt))) (type: string) + expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0D) + 1.0D)))), '-'), UDFToString(year(dt))) (type: string) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out index d792c46c25..184d7bcf64 100644 --- a/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out @@ -71,7 +71,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColumnInList(col 2:double, values [1.0E8, 2.0E8])(children: CastDecimalToDouble(col 0:decimal(18,0)) -> 2:double) - predicate: (UDFToDouble(id)) IN (1.0E8, 2.0E8) (type: boolean) + predicate: (UDFToDouble(id)) IN (1.0E8D, 2.0E8D) (type: boolean) Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: decimal(18,0)) diff --git a/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out b/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out index 4af1015fe7..4d2130b70b 100644 --- a/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out @@ -846,7 +846,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterStructColumnInList(structExpressions [col 0:bigint, col 1:string, col 2:double], fieldVectorColumnTypes [LONG, BYTES, DOUBLE], structColumnMap [0, 1, 2]) - predicate: (struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5)) (type: boolean) + predicate: (struct(my_bigint,my_string,my_double)) IN (const struct(1L,'a',1.5D), const struct(1L,'b',-0.5D), const struct(3L,'b',1.5D), const struct(1L,'d',1.5D), const struct(1L,'c',1.5D), const struct(1L,'b',2.5D), const struct(1L,'b',0.5D), const struct(5L,'b',1.5D), const struct(1L,'a',0.5D), const struct(3L,'b',1.5D)) (type: boolean) Statistics: Num rows: 3 Data size: 303 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double) @@ -968,7 +968,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double), (struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5)) (type: boolean) + expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double), (struct(my_bigint,my_string,my_double)) IN (const struct(1L,'a',1.5D), const struct(1L,'b',-0.5D), const struct(3L,'b',1.5D), const struct(1L,'d',1.5D), const struct(1L,'c',1.5D), const struct(1L,'b',2.5D), const struct(1L,'b',0.5D), const struct(5L,'b',1.5D), const struct(1L,'a',0.5D), const struct(3L,'b',1.5D)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out b/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out index a752dfadd4..7402667b55 100644 --- a/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out @@ -80,13 +80,13 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:name:string, 1:age:int, 2:gpa:double, 3:ROW__ID:struct] Select Operator - expressions: UDFToFloat(gpa) (type: float), age (type: int), if((age > 40), 2011-01-01 01:01:01.0, null) (type: timestamp), if((length(name) > 10), CAST( name AS BINARY), null) (type: binary) + expressions: UDFToFloat(gpa) (type: float), age (type: int), if((age > 40), TIMESTAMP'2011-01-01 01:01:01.0', null) (type: timestamp), if((length(name) > 10), CAST( name AS BINARY), null) (type: binary) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [2, 1, 5, 8] - selectExpressions: VectorUDFAdaptor(if((age > 40), 2011-01-01 01:01:01.0, null))(children: LongColGreaterLongScalar(col 1:int, val 40) -> 4:boolean) -> 5:timestamp, VectorUDFAdaptor(if((length(name) > 10), CAST( name AS BINARY), null))(children: LongColGreaterLongScalar(col 4:int, val 10)(children: StringLength(col 0:string) -> 4:int) -> 6:boolean, VectorUDFAdaptor(CAST( name AS BINARY)) -> 7:binary) -> 8:binary + selectExpressions: VectorUDFAdaptor(if((age > 40), TIMESTAMP'2011-01-01 01:01:01.0', null))(children: LongColGreaterLongScalar(col 1:int, val 40) -> 4:boolean) -> 5:timestamp, VectorUDFAdaptor(if((length(name) > 10), CAST( name AS BINARY), null))(children: LongColGreaterLongScalar(col 4:int, val 10)(children: StringLength(col 0:string) -> 4:int) -> 6:boolean, VectorUDFAdaptor(CAST( name AS BINARY)) -> 7:binary) -> 8:binary Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out index 7e9a564487..5405bd8a29 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out @@ -348,7 +348,7 @@ STAGE PLANS: window frame: ROWS PRECEDING(MAX)~CURRENT Statistics: Num rows: 26 Data size: 9828 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col2 (type: string), _col7 (type: double), _col5 (type: int), rank_window_0 (type: int), sum_window_1 (type: double), (sum_window_1 - 5.0) (type: double) + expressions: _col2 (type: string), _col7 (type: double), _col5 (type: int), rank_window_0 (type: int), sum_window_1 (type: double), (sum_window_1 - 5.0D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 26 Data size: 3380 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -2002,7 +2002,7 @@ STAGE PLANS: Lead/Lag information: lag(...) (type: double) Statistics: Num rows: 26 Data size: 9828 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col2 (type: string), _col7 (type: double), _col5 (type: int), ((round(sum_window_0, 2) + 50.0) = round((sum_window_1 + last_value_window_2), 2)) (type: boolean) + expressions: _col2 (type: string), _col7 (type: double), _col5 (type: int), ((round(sum_window_0, 2) + 50.0D) = round((sum_window_1 + last_value_window_2), 2)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 26 Data size: 2964 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out index 8dc3fa7764..caf99639af 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out @@ -1424,7 +1424,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterExprOrExpr(children: FilterStringGroupColEqualStringScalar(col 7:string, val oscar allen), FilterStringGroupColEqualStringScalar(col 7:string, val oscar carson)), FilterLongColEqualLongScalar(col 0:tinyint, val 10)) - predicate: (((s = 'oscar allen') or (s = 'oscar carson')) and (t = 10)) (type: boolean) + predicate: (((s = 'oscar allen') or (s = 'oscar carson')) and (t = 10Y)) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToByte(10) (type: tinyint), s (type: string) @@ -1516,7 +1516,7 @@ STAGE PLANS: streamingColumns: [] Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 10 (type: tinyint), _col7 (type: string), _col2 (type: int), last_value_window_0 (type: int) + expressions: 10Y (type: tinyint), _col7 (type: string), _col2 (type: int), last_value_window_0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out index 8aa904f6bc..c5dd540b74 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out @@ -1040,7 +1040,7 @@ STAGE PLANS: streamingColumns: [] Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string), _col2 (type: int), round((avg_window_0 / 10.0), 3) (type: double) + expressions: _col7 (type: string), _col2 (type: int), round((avg_window_0 / 10.0D), 3) (type: double) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -1205,7 +1205,7 @@ STAGE PLANS: streamingColumns: [] Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string), _col2 (type: int), round(((avg_window_0 + 10.0) - (avg_window_0 - 10.0)), 3) (type: double) + expressions: _col7 (type: string), _col2 (type: int), round(((avg_window_0 + 10.0D) - (avg_window_0 - 10.0D)), 3) (type: double) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out index aa47b1bf75..cb8baa0f2d 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out @@ -1638,7 +1638,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColLessLongScalar(col 0:tinyint, val 10), SelectColumnIsNotNull(col 3:bigint)) - predicate: ((t < 10) and b is not null) (type: boolean) + predicate: ((t < 10Y) and b is not null) (type: boolean) Statistics: Num rows: 1 Data size: 164 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: b (type: bigint), ts (type: timestamp), dec (type: decimal(4,2)) diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out index 2c54027bed..ebfcf76b0d 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out @@ -431,7 +431,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColLessLongScalar(col 0:tinyint, val 5) - predicate: (t < 5) (type: boolean) + predicate: (t < 5Y) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: t (type: tinyint), f (type: float) diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out index 2d48bd50a4..f47089eeee 100644 --- a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out +++ b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out @@ -2020,7 +2020,7 @@ STAGE PLANS: streamingColumns: [] Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string), _col2 (type: int), round((avg_window_0 / 10.0), 2) (type: double) + expressions: _col7 (type: string), _col2 (type: int), round((avg_window_0 / 10.0D), 2) (type: double) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -2187,7 +2187,7 @@ STAGE PLANS: streamingColumns: [] Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string), _col2 (type: int), round(((avg_window_0 + 10.0) - (avg_window_0 - 10.0)), 2) (type: double) + expressions: _col7 (type: string), _col2 (type: int), round(((avg_window_0 + 10.0D) - (avg_window_0 - 10.0D)), 2) (type: double) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out index 49c3036270..2e6515ab35 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out @@ -1666,7 +1666,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0 + _col0) (type: double), _col1 (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) + (-6432.0 + _col0)) (type: double), _col2 (type: double), (- (-6432.0 + _col0)) (type: double), (-6432.0 + (- (-6432.0 + _col0))) (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) / (- (-6432.0 + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0 + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) + expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0D + _col0) (type: double), _col1 (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) + (-6432.0D + _col0)) (type: double), _col2 (type: double), (- (-6432.0D + _col0)) (type: double), (-6432.0D + (- (-6432.0D + _col0))) (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) / (- (-6432.0D + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0D + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_1.q.out b/ql/src/test/results/clientpositive/llap/vectorization_1.q.out index d2de8e75b9..8fd8a95d37 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_1.q.out @@ -132,7 +132,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 / -26.28) (type: double), _col1 (type: double), (-1.389 + _col1) (type: double), (_col1 * (-1.389 + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389 + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175 % (- (_col1 * (-1.389 + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) + expressions: _col0 (type: double), (_col0 / -26.28D) (type: double), _col1 (type: double), (-1.389D + _col1) (type: double), (_col1 * (-1.389D + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389D + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175D % (- (_col1 * (-1.389D + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_10.q.out b/ql/src/test/results/clientpositive/llap/vectorization_10.q.out index b6c68fbedd..2913167423 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_10.q.out @@ -72,10 +72,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringGroupColLessEqualStringScalar(col 7:string, val 10), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterDecimalScalarGreaterEqualDecimalColumn(val -5638.15, col 14:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(6,2))), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 6981.0), FilterExprOrExpr(children: FilterDecimalColEqualDecimalScalar(col 15:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(11,4)), FilterStringColLikeStringScalar(col 6:string, pattern %a)))) - predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) + predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0D) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) Statistics: Num rows: 9557 Data size: 1937820 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639 - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) + expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0D) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639D - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_11.q.out b/ql/src/test/results/clientpositive/llap/vectorization_11.q.out index bb0feecc73..69c22fbc28 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_11.q.out @@ -57,7 +57,7 @@ STAGE PLANS: predicate: ((cstring2 = cstring1) or (ctimestamp1 is null and (cstring1 like '%a'))) (type: boolean) Statistics: Num rows: 6144 Data size: 1190792 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0) (type: double), (cdouble * -5638.15) (type: double) + expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639D) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0D) (type: double), (cdouble * -5638.15D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_12.q.out b/ql/src/test/results/clientpositive/llap/vectorization_12.q.out index 77f8e3be97..a0365ecef2 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_12.q.out @@ -160,7 +160,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 154 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0 * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0 * _col0) / -6432.0) (type: double), (- ((-6432.0 * _col0) / -6432.0)) (type: double), _col6 (type: double), (- (-6432.0 * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0 * _col0)) (type: double), (- (- ((-6432.0 * _col0) / -6432.0))) (type: double), (((-6432.0 * _col0) / -6432.0) + (- (-6432.0 * _col0))) (type: double), _col8 (type: double) + expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0D * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0D * _col0) / -6432.0D) (type: double), (- ((-6432.0D * _col0) / -6432.0D)) (type: double), _col6 (type: double), (- (-6432.0D * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0D * _col0)) (type: double), (- (- ((-6432.0D * _col0) / -6432.0D))) (type: double), (((-6432.0D * _col0) / -6432.0D) + (- (-6432.0D * _col0))) (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19 Statistics: Num rows: 1 Data size: 338 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_13.q.out b/ql/src/test/results/clientpositive/llap/vectorization_13.q.out index 23914f8ed1..7d6d930214 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_13.q.out @@ -94,7 +94,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 11.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 12.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > 11.0D) and (UDFToDouble(ctimestamp2) <> 12.0D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 5461 Data size: 901772 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -162,7 +162,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 211860 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 446640 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -420,7 +420,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -1.388)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val -1.3359999999999999)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > -1.388D) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 5461 Data size: 901772 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -480,7 +480,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 211860 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 446640 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_14.q.out b/ql/src/test/results/clientpositive/llap/vectorization_14.q.out index c7a32e3a7e..0a631cabef 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_14.q.out @@ -94,10 +94,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColLessEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterTimestampColLessTimestampColumn(col 9:timestamp, col 8:timestamp)), FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -257), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float))) - predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) + predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257L) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) Statistics: Num rows: 606 Data size: 105558 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -163,7 +163,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 151 Data size: 25224 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28 + _col2) (type: double), (- (-26.28 + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28 + _col2)) / 10.175) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28 + _col2)) / 10.175)) (type: double), (-1.389 % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) + expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175D) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 151 Data size: 36700 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_15.q.out b/ql/src/test/results/clientpositive/llap/vectorization_15.q.out index 9ab5965e48..b39817826d 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_15.q.out @@ -90,7 +90,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %ss%), FilterStringColLikeStringScalar(col 6:string, pattern 10%), FilterExprAndExpr(children: FilterLongColGreaterEqualLongScalar(col 2:int, val -75), FilterLongColEqualLongColumn(col 0:smallint, col 1:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -3728.0))) - predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) + predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0D)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) Statistics: Num rows: 12288 Data size: 2491562 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -157,7 +157,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 3072 Data size: 541028 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) + expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 3072 Data size: 1327460 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_16.q.out b/ql/src/test/results/clientpositive/llap/vectorization_16.q.out index e7d873f58a..336e36e363 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_16.q.out @@ -67,7 +67,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 769522 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -135,7 +135,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1024 Data size: 143566 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1024 Data size: 307406 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_17.q.out b/ql/src/test/results/clientpositive/llap/vectorization_17.q.out index bdcb1ebfee..63c9261f69 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_17.q.out @@ -75,10 +75,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -23), FilterExprOrExpr(children: FilterDoubleColNotEqualDoubleScalar(col 5:double, val 988888.0), FilterDecimalColGreaterDecimalScalar(col 13:decimal(13,3), val -863.257)(children: CastLongToDecimal(col 2:int) -> 13:decimal(13,3))), FilterExprOrExpr(children: FilterLongColGreaterEqualLongScalar(col 0:tinyint, val 33), FilterLongColGreaterEqualLongColumn(col 1:bigint, col 3:bigint)(children: col 1:smallint), FilterDoubleColEqualDoubleColumn(col 4:double, col 5:double)(children: col 4:float))) - predicate: (((cdouble <> 988888.0) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23)) (type: boolean) + predicate: (((cdouble <> 988888.0D) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33Y) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23L)) (type: boolean) Statistics: Num rows: 4096 Data size: 549274 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58 + (- (- cdouble))) (type: double) + expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58D + (- (- cdouble))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_2.q.out b/ql/src/test/results/clientpositive/llap/vectorization_2.q.out index 2d4b703cd3..2cb824e5a7 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_2.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_2.q.out @@ -73,7 +73,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessTimestampColumn(col 8:timestamp, col 9:timestamp), FilterStringColLikeStringScalar(col 7:string, pattern b%), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -5638.14990234375)), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterDoubleScalarNotEqualDoubleColumn(val -10669.0, col 13:double)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterLongScalarGreaterLongColumn(val 359, col 2:int)))) - predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0 <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) + predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0D <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) Statistics: Num rows: 4096 Data size: 719232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cbigint (type: bigint), cfloat (type: float), cdouble (type: double) @@ -136,7 +136,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 % -563.0) (type: double), (_col0 + 762.0) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) + expressions: _col0 (type: double), (_col0 % -563.0D) (type: double), (_col0 + 762.0D) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0D) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_3.q.out b/ql/src/test/results/clientpositive/llap/vectorization_3.q.out index 9de4fcd82d..13da83546d 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_3.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_3.q.out @@ -78,7 +78,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 14:decimal(22,3))(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterDoubleColEqualDoubleScalar(col 13:double, val -29071.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterDecimalScalarLessEqualDecimalColumn(val 79.553, col 15:decimal(8,3))(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(8,3)), FilterTimestampColGreaterTimestampColumn(col 8:timestamp, col 9:timestamp))) - predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0))) (type: boolean) + predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0D))) (type: boolean) Statistics: Num rows: 2503 Data size: 260060 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float) @@ -141,7 +141,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 - 10.175) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175)) (type: double), (- _col1) (type: double), (_col0 % 79.553) (type: double), (- (_col0 * (_col0 - 10.175))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175))) / (_col0 - 10.175)) (type: double), (- (_col0 - 10.175)) (type: double), _col4 (type: double), (-3728.0 - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) + expressions: _col0 (type: double), (_col0 - 10.175D) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175D)) (type: double), (- _col1) (type: double), (_col0 % 79.553D) (type: double), (- (_col0 * (_col0 - 10.175D))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175D))) / (_col0 - 10.175D)) (type: double), (- (_col0 - 10.175D)) (type: double), _col4 (type: double), (-3728.0D - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_4.q.out b/ql/src/test/results/clientpositive/llap/vectorization_4.q.out index a2142b716d..227b2a62bb 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_4.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_4.q.out @@ -73,7 +73,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterEqualLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterExprAndExpr(children: FilterLongScalarGreaterEqualLongColumn(val -89010, col 0:int)(children: col 0:tinyint), FilterDoubleColGreaterDoubleScalar(col 5:double, val 79.553)), FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -563, col 3:bigint), FilterExprOrExpr(children: FilterLongColNotEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterDoubleScalarGreaterEqualDoubleColumn(val -3728.0, col 5:double)))) - predicate: (((-563 <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0 >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) + predicate: (((-563L <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0D >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553D)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) Statistics: Num rows: 12288 Data size: 256884 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cdouble (type: double) @@ -136,7 +136,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: bigint), (_col0 * -563) (type: bigint), (-3728 + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2)) (type: double), ((-3728 + _col0) - (_col0 * -563)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2))) (type: double) + expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2)) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_5.q.out b/ql/src/test/results/clientpositive/llap/vectorization_5.q.out index d41de01162..48a4ce33a9 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_5.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_5.q.out @@ -145,7 +145,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0 % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0D % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_6.q.out b/ql/src/test/results/clientpositive/llap/vectorization_6.q.out index 84b9260c18..fa587aa1af 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_6.q.out @@ -66,10 +66,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 10:boolean, val 0), FilterLongColGreaterEqualLongColumn(col 11:boolean, col 10:boolean)), FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:bigint), FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %a), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -257.0))))) - predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0)) (type: boolean) + predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5951 Data size: 1022000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28 / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) + expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28D / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out index 3c7522912d..68b4065a4c 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out @@ -81,7 +81,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -15.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 1342196 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) @@ -330,7 +330,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 7.6850000000000005)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 1342196 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) diff --git a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out index 22a1b34d0b..a9dfffb5fc 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out @@ -77,10 +77,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 10.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 16.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0D) and (UDFToDouble(ctimestamp2) <> 16.0D))) (type: boolean) Statistics: Num rows: 3059 Data size: 742850 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator @@ -313,10 +313,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 12.503)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 11.998)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503D) and (UDFToDouble(ctimestamp2) <> 11.998D))) (type: boolean) Statistics: Num rows: 3059 Data size: 742850 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_9.q.out b/ql/src/test/results/clientpositive/llap/vectorization_9.q.out index e7d873f58a..336e36e363 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_9.q.out @@ -67,7 +67,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 769522 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -135,7 +135,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1024 Data size: 143566 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1024 Data size: 307406 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out index e46c7f4524..80c7c0cda3 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out @@ -72,7 +72,7 @@ STAGE PLANS: alias: alltypesorc_part Statistics: Num rows: 200 Data size: 1592 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (cdouble + 2.0) (type: double) + expressions: (cdouble + 2.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 200 Data size: 1600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out index bd5e284523..993bfd5c1e 100644 --- a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out @@ -100,7 +100,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongScalarEqualLongColumn(val 762, col 3:bigint), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 1:smallint) -> 13:float), FilterDoubleColGreaterDoubleScalar(col 13:double, val -5.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 2:int) -> 13:double)), FilterStringGroupColEqualStringScalar(col 6:string, val a), FilterExprAndExpr(children: FilterDecimalColLessEqualDecimalScalar(col 14:decimal(22,3), val -1.389)(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterStringGroupColNotEqualStringScalar(col 7:string, val a), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 15:decimal(13,3))(children: CastLongToDecimal(col 2:int) -> 15:decimal(13,3)), FilterLongColNotEqualLongColumn(col 11:boolean, col 10:boolean))) - predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (762 = cbigint) or (cstring1 = 'a')) (type: boolean) + predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0D) and (cdouble <> UDFToDouble(cint))) or (762L = cbigint) or (cstring1 = 'a')) (type: boolean) Statistics: Num rows: 5465 Data size: 1157230 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float), cdouble (type: double) @@ -155,7 +155,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 68 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 + -3728.0) (type: double), (- (_col0 + -3728.0)) (type: double), (- (- (_col0 + -3728.0))) (type: double), ((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) * (- (- (_col0 + -3728.0)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175 - _col4) (type: double), (- (10.175 - _col4)) (type: double), ((- _col2) / -563.0) (type: double), _col6 (type: double), (- ((- _col2) / -563.0)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0)) (type: double), (- (_col0 / _col1)) (type: double) + expressions: _col0 (type: double), (_col0 + -3728.0D) (type: double), (- (_col0 + -3728.0D)) (type: double), (- (- (_col0 + -3728.0D))) (type: double), ((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) * (- (- (_col0 + -3728.0D)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0D)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0D)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175D - _col4) (type: double), (- (10.175D - _col4)) (type: double), ((- _col2) / -563.0D) (type: double), _col6 (type: double), (- ((- _col2) / -563.0D)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0D)) (type: double), (- (_col0 / _col1)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -345,7 +345,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 3:bigint, val 197), FilterLongColLessLongColumn(col 2:bigint, col 3:bigint)(children: col 2:int)), FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -26.28), FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 1:smallint) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 0:tinyint) -> 13:float), FilterStringColRegExpStringScalar(col 6:string, pattern .*ss.*)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 4:float, val 79.5530014038086), FilterStringColLikeStringScalar(col 7:string, pattern 10%))) - predicate: (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean) + predicate: (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cbigint <= 197L) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28D) and (UDFToDouble(csmallint) > cdouble)) or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean) Statistics: Num rows: 6826 Data size: 1131534 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cdouble (type: double) @@ -400,7 +400,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), (UDFToDouble(_col0) / -3728.0) (type: double), (_col0 * -3728) (type: int), _col1 (type: double), (- (_col0 * -3728)) (type: int), _col2 (type: double), (-563 % (_col0 * -3728)) (type: int), (_col1 / _col2) (type: double), (- _col2) (type: double), _col3 (type: double), _col4 (type: double), (_col2 - 10.175) (type: double), _col5 (type: int), (UDFToDouble((_col0 * -3728)) % (_col2 - 10.175)) (type: double), (- _col3) (type: double), _col6 (type: double), (_col3 % -26.28) (type: double), _col7 (type: double), (- (UDFToDouble(_col0) / -3728.0)) (type: double), ((- (_col0 * -3728)) % (-563 % (_col0 * -3728))) (type: int), ((UDFToDouble(_col0) / -3728.0) - _col4) (type: double), (- (_col0 * -3728)) (type: int), _col8 (type: double) + expressions: _col0 (type: int), (UDFToDouble(_col0) / -3728.0D) (type: double), (_col0 * -3728) (type: int), _col1 (type: double), (- (_col0 * -3728)) (type: int), _col2 (type: double), (-563 % (_col0 * -3728)) (type: int), (_col1 / _col2) (type: double), (- _col2) (type: double), _col3 (type: double), _col4 (type: double), (_col2 - 10.175D) (type: double), _col5 (type: int), (UDFToDouble((_col0 * -3728)) % (_col2 - 10.175D)) (type: double), (- _col3) (type: double), _col6 (type: double), (_col3 % -26.28D) (type: double), _col7 (type: double), (- (UDFToDouble(_col0) / -3728.0D)) (type: double), ((- (_col0 * -3728)) % (-563 % (_col0 * -3728))) (type: int), ((UDFToDouble(_col0) / -3728.0D) - _col4) (type: double), (- (_col0 * -3728)) (type: int), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22 Statistics: Num rows: 1 Data size: 156 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -637,7 +637,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (- _col0) (type: double), (_col0 - (- _col0)) (type: double), _col1 (type: bigint), (CAST( _col1 AS decimal(19,0)) % 79.553) (type: decimal(5,3)), _col2 (type: tinyint), (UDFToDouble(_col1) - (- _col0)) (type: double), (- (- _col0)) (type: double), (-1.0 % (- _col0)) (type: double), _col1 (type: bigint), (- _col1) (type: bigint), _col3 (type: double), (- (- (- _col0))) (type: double), (762 * (- _col1)) (type: bigint), _col4 (type: int), (UDFToLong(_col2) + (762 * (- _col1))) (type: bigint), ((- _col0) + UDFToDouble(_col4)) (type: double), _col5 (type: double), ((- _col1) % _col1) (type: bigint), _col6 (type: bigint), _col7 (type: double), (-3728 % (UDFToLong(_col2) + (762 * (- _col1)))) (type: bigint) + expressions: _col0 (type: double), (- _col0) (type: double), (_col0 - (- _col0)) (type: double), _col1 (type: bigint), (CAST( _col1 AS decimal(19,0)) % 79.553) (type: decimal(5,3)), _col2 (type: tinyint), (UDFToDouble(_col1) - (- _col0)) (type: double), (- (- _col0)) (type: double), (-1.0D % (- _col0)) (type: double), _col1 (type: bigint), (- _col1) (type: bigint), _col3 (type: double), (- (- (- _col0))) (type: double), (762L * (- _col1)) (type: bigint), _col4 (type: int), (UDFToLong(_col2) + (762L * (- _col1))) (type: bigint), ((- _col0) + UDFToDouble(_col4)) (type: double), _col5 (type: double), ((- _col1) % _col1) (type: bigint), _col6 (type: bigint), _col7 (type: double), (-3728L % (UDFToLong(_col2) + (762L * (- _col1)))) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 1 Data size: 272 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -798,7 +798,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessEqualTimestampColumn(col 9:timestamp, col 8:timestamp), FilterDoubleColNotEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterStringScalarLessEqualStringGroupColumn(val ss, col 6:string)), FilterExprAndExpr(children: FilterLongColLessLongColumn(col 1:smallint, col 0:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double)), FilterDoubleColEqualDoubleScalar(col 4:float, val 17.0)) - predicate: (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or ((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or (cfloat = 17)) (type: boolean) + predicate: (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0D)) or ((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or (cfloat = 17)) (type: boolean) Statistics: Num rows: 2824 Data size: 491654 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cbigint (type: bigint), cfloat (type: float) @@ -853,7 +853,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), (_col0 + 6981.0) (type: double), ((_col0 + 6981.0) + _col0) (type: double), _col1 (type: bigint), (((_col0 + 6981.0) + _col0) / _col0) (type: double), (- (_col0 + 6981.0)) (type: double), _col2 (type: double), (_col0 % (- (_col0 + 6981.0))) (type: double), _col3 (type: double), _col4 (type: double), (- _col1) (type: bigint), (UDFToDouble((- _col1)) / _col2) (type: double), _col5 (type: float), (_col4 * -26.28) (type: double) + expressions: _col0 (type: double), (_col0 + 6981.0D) (type: double), ((_col0 + 6981.0D) + _col0) (type: double), _col1 (type: bigint), (((_col0 + 6981.0D) + _col0) / _col0) (type: double), (- (_col0 + 6981.0D)) (type: double), _col2 (type: double), (_col0 % (- (_col0 + 6981.0D))) (type: double), _col3 (type: double), _col4 (type: double), (- _col1) (type: bigint), (UDFToDouble((- _col1)) / _col2) (type: double), _col5 (type: float), (_col4 * -26.28D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -1025,7 +1025,7 @@ STAGE PLANS: predicate: (((1 <> cboolean2) and (CAST( csmallint AS decimal(8,3)) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint)) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or (cstring1 regexp 'a.*' and (cstring2 like '%ss%'))) (type: boolean) Statistics: Num rows: 9898 Data size: 2462086 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - CAST( cint AS decimal(10,0))) (type: decimal(14,3)), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - CAST( cint AS decimal(10,0))) - -26.28) (type: decimal(15,3)), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / CAST( ctinyint AS decimal(3,0))) (type: decimal(9,7)) + expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728L * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - CAST( cint AS decimal(10,0))) (type: decimal(14,3)), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - CAST( cint AS decimal(10,0))) - -26.28) (type: decimal(15,3)), (- cfloat) (type: float), (cdouble * -89010.0D) (type: double), (UDFToDouble(ctinyint) / 988888.0D) (type: double), (- ctinyint) (type: tinyint), (79.553 / CAST( ctinyint AS decimal(3,0))) (type: decimal(9,7)) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22 Select Vectorization: className: VectorSelectOperator @@ -1319,10 +1319,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongScalarGreaterLongColumn(val 197, col 0:int)(children: col 0:tinyint), FilterLongColEqualLongColumn(col 2:bigint, col 3:bigint)(children: col 2:int)), FilterLongColEqualLongScalar(col 3:bigint, val 359), FilterLongColLessLongScalar(col 10:boolean, val 0), FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 6:string, pattern %ss), FilterDoubleColLessEqualDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 0:tinyint) -> 13:float))) - predicate: (((197 > UDFToInteger(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint))) or (cbigint = 359) or (cboolean1 < 0)) (type: boolean) + predicate: (((197 > UDFToInteger(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint))) or (cbigint = 359L) or (cboolean1 < 0)) (type: boolean) Statistics: Num rows: 8194 Data size: 1734900 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (CAST( cbigint AS decimal(19,0)) % 79.553) (type: decimal(5,3)), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % cfloat) (type: float), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint) + expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (CAST( cbigint AS decimal(19,0)) % 79.553) (type: decimal(5,3)), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % cfloat) (type: float), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569L % cbigint) (type: bigint), (359.0D - cdouble) (type: double), (- csmallint) (type: smallint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Select Vectorization: className: VectorSelectOperator @@ -1872,7 +1872,7 @@ STAGE PLANS: predicate: (((-1.389 >= CAST( cint AS decimal(13,3))) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > CAST( cbigint AS decimal(22,3))))) (type: boolean) Statistics: Num rows: 3868 Data size: 795962 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0D) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175D) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Select Vectorization: className: VectorSelectOperator @@ -2175,7 +2175,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1141 Data size: 39924 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) % -75) (type: int), _col1 (type: double), (-1.389 / CAST( _col0 AS decimal(5,0))) (type: decimal(10,9)), _col2 (type: bigint), (UDFToDouble((UDFToInteger(_col0) % -75)) / UDFToDouble(_col2)) (type: double), (- (UDFToInteger(_col0) % -75)) (type: int), _col3 (type: double), (- (- (UDFToInteger(_col0) % -75))) (type: int), _col4 (type: bigint), (_col4 - -89010) (type: bigint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) % -75) (type: int), _col1 (type: double), (-1.389 / CAST( _col0 AS decimal(5,0))) (type: decimal(10,9)), _col2 (type: bigint), (UDFToDouble((UDFToInteger(_col0) % -75)) / UDFToDouble(_col2)) (type: double), (- (UDFToInteger(_col0) % -75)) (type: int), _col3 (type: double), (- (- (UDFToInteger(_col0) % -75))) (type: int), _col4 (type: bigint), (_col4 - -89010L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1141 Data size: 199664 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -2373,7 +2373,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 2563.58), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColGreaterEqualLongColumn(col 3:bigint, col 2:bigint)(children: col 2:int), FilterLongColLessLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterDoubleColLessDoubleScalar(col 4:float, val -5638.14990234375)), FilterDecimalScalarEqualDecimalColumn(val 2563.58, col 13:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 13:decimal(6,2)), FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 5:double, col 14:double)(children: CastLongToDouble(col 3:bigint) -> 14:double), FilterDecimalScalarGreaterDecimalColumn(val -5638.15, col 15:decimal(21,2))(children: CastLongToDecimal(col 3:bigint) -> 15:decimal(21,2))))) - predicate: ((((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (cfloat < -5638.15)) or (2563.58 = CAST( ctinyint AS decimal(6,2))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > CAST( cbigint AS decimal(21,2))))) and (cdouble > 2563.58)) (type: boolean) + predicate: ((((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (cfloat < -5638.15)) or (2563.58 = CAST( ctinyint AS decimal(6,2))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > CAST( cbigint AS decimal(21,2))))) and (cdouble > 2563.58D)) (type: boolean) Statistics: Num rows: 2503 Data size: 59820 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cfloat (type: float), cdouble (type: double) @@ -2433,7 +2433,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1136 Data size: 61320 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: double), _col1 (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double) + expressions: _col0 (type: double), _col1 (type: double), (2563.58D * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58D * _col1) + -5638.15D) (type: double), ((- _col1) * ((2563.58D * _col1) + -5638.15D)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0D) (type: double), _col6 (type: double), (-863.257D % (_col0 * 762.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Statistics: Num rows: 1136 Data size: 143112 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -2675,7 +2675,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColNotEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -257, col 0:int)(children: col 0:tinyint), SelectColumnIsNotNull(col 11:boolean), FilterStringColRegExpStringScalar(col 6:string, pattern .*ss), FilterDoubleScalarLessDoubleColumn(val -3.0, col 13:double)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double)), FilterDoubleColEqualDoubleScalar(col 13:double, val -5.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterStringColLikeStringScalar(col 7:string, pattern %b%)), FilterDoubleColEqualDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterExprAndExpr(children: SelectColumnIsNull(col 10:boolean), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float)))) - predicate: ((((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))) and (UDFToDouble(ctimestamp1) <> 0.0)) (type: boolean) + predicate: ((((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0D < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0D) or ((UDFToDouble(ctimestamp1) < 0.0D) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))) and (UDFToDouble(ctimestamp1) <> 0.0D)) (type: boolean) Statistics: Num rows: 12288 Data size: 3019778 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -2735,7 +2735,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Statistics: Num rows: 3072 Data size: 645716 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / CAST( (- _col5) AS decimal(3,0))) (type: decimal(8,6)), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double) + expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175D) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28D - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28D - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175D)) (type: double), _col6 (type: double), (_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175D / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175D))) (type: double), _col10 (type: double), (((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175D) (type: double), (10.175D % (10.175D / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28D - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / CAST( (- _col5) AS decimal(3,0))) (type: decimal(8,6)), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38 Statistics: Num rows: 3072 Data size: 1542740 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -3118,7 +3118,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 3 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: boolean), _col1 (type: float), (- _col1) (type: float), (-26.28 / UDFToDouble(_col1)) (type: double), _col2 (type: bigint), (CAST( _col2 AS decimal(19,0)) - 10.175) (type: decimal(23,3)), _col3 (type: double), (_col3 % UDFToDouble(_col1)) (type: double), (10.175 + (- _col1)) (type: float), _col4 (type: double), (UDFToDouble((CAST( _col2 AS decimal(19,0)) - 10.175)) + _col3) (type: double), _col5 (type: bigint), _col6 (type: double), (- (10.175 + (- _col1))) (type: float), (79.553 / _col6) (type: double), (_col3 % (79.553 / _col6)) (type: double), _col7 (type: bigint), _col8 (type: double), (-1.389 * CAST( _col5 AS decimal(19,0))) (type: decimal(24,3)), (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0)))) (type: decimal(25,3)), _col9 (type: double), (- (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0))))) (type: decimal(25,3)), _col10 (type: double), (- _col10) (type: double), (_col10 * UDFToDouble(_col7)) (type: double) + expressions: _col0 (type: boolean), _col1 (type: float), (- _col1) (type: float), (-26.28D / UDFToDouble(_col1)) (type: double), _col2 (type: bigint), (CAST( _col2 AS decimal(19,0)) - 10.175) (type: decimal(23,3)), _col3 (type: double), (_col3 % UDFToDouble(_col1)) (type: double), (10.175 + (- _col1)) (type: float), _col4 (type: double), (UDFToDouble((CAST( _col2 AS decimal(19,0)) - 10.175)) + _col3) (type: double), _col5 (type: bigint), _col6 (type: double), (- (10.175 + (- _col1))) (type: float), (79.553D / _col6) (type: double), (_col3 % (79.553D / _col6)) (type: double), _col7 (type: bigint), _col8 (type: double), (-1.389 * CAST( _col5 AS decimal(19,0))) (type: decimal(24,3)), (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0)))) (type: decimal(25,3)), _col9 (type: double), (- (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0))))) (type: decimal(25,3)), _col10 (type: double), (- _col10) (type: double), (_col10 * UDFToDouble(_col7)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 Statistics: Num rows: 3 Data size: 1800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out index ed17e5c5f1..9143816255 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out @@ -60,10 +60,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1:smallint, val 418), FilterLongColEqualLongScalar(col 1:smallint, val 12205), FilterLongColEqualLongScalar(col 1:smallint, val 10583)) - predicate: ((csmallint = 10583) or (csmallint = 12205) or (csmallint = 418)) (type: boolean) + predicate: ((csmallint = 10583S) or (csmallint = 12205S) or (csmallint = 418S)) (type: boolean) Statistics: Num rows: 6 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string) + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -209,10 +209,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1:smallint, val 418), FilterLongColEqualLongScalar(col 1:smallint, val 12205), FilterLongColEqualLongScalar(col 1:smallint, val 10583)) - predicate: ((csmallint = 10583) or (csmallint = 12205) or (csmallint = 418)) (type: boolean) + predicate: ((csmallint = 10583S) or (csmallint = 12205S) or (csmallint = 418S)) (type: boolean) Statistics: Num rows: 6 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN (null) ELSE ('c') END (type: string) + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN (null) ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -849,7 +849,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE ((attr + 2)) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN ((attr + 1L)) ELSE ((attr + 2L)) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -930,7 +930,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN (null) ELSE ((attr + 2)) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN (null) ELSE ((attr + 2L)) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1011,7 +1011,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (null) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN ((attr + 1L)) ELSE (null) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out b/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out index 84b4d9454d..cc4e090b8d 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out @@ -177,7 +177,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 842180 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: UDFToBoolean(ctinyint) (type: boolean), UDFToBoolean(csmallint) (type: boolean), UDFToBoolean(cint) (type: boolean), UDFToBoolean(cbigint) (type: boolean), UDFToBoolean(cfloat) (type: boolean), UDFToBoolean(cdouble) (type: boolean), cboolean1 (type: boolean), UDFToBoolean((cbigint * 0)) (type: boolean), UDFToBoolean(ctimestamp1) (type: boolean), UDFToBoolean(cstring1) (type: boolean), UDFToInteger(ctinyint) (type: int), UDFToInteger(csmallint) (type: int), cint (type: int), UDFToInteger(cbigint) (type: int), UDFToInteger(cfloat) (type: int), UDFToInteger(cdouble) (type: int), UDFToInteger(cboolean1) (type: int), UDFToInteger(ctimestamp1) (type: int), UDFToInteger(cstring1) (type: int), UDFToInteger(substr(cstring1, 1, 1)) (type: int), UDFToByte(cfloat) (type: tinyint), UDFToShort(cfloat) (type: smallint), UDFToLong(cfloat) (type: bigint), UDFToDouble(ctinyint) (type: double), UDFToDouble(csmallint) (type: double), UDFToDouble(cint) (type: double), UDFToDouble(cbigint) (type: double), UDFToDouble(cfloat) (type: double), cdouble (type: double), UDFToDouble(cboolean1) (type: double), UDFToDouble(ctimestamp1) (type: double), UDFToDouble(cstring1) (type: double), UDFToDouble(substr(cstring1, 1, 1)) (type: double), UDFToFloat(cint) (type: float), UDFToFloat(cdouble) (type: float), CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), CAST( CAST( ctimestamp1 AS DATE) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp), UDFToString(ctinyint) (type: string), UDFToString(csmallint) (type: string), UDFToString(cint) (type: string), UDFToString(cbigint) (type: string), UDFToString(cfloat) (type: string), UDFToString(cdouble) (type: string), UDFToString(cboolean1) (type: string), UDFToString((cbigint * 0)) (type: string), UDFToString(ctimestamp1) (type: string), cstring1 (type: string), UDFToString(CAST( cstring1 AS CHAR(10)) (type: string), UDFToString(CAST( cstring1 AS varchar(10))) (type: string), UDFToFloat(UDFToInteger(cfloat)) (type: float), UDFToDouble((cint * 2)) (type: double), UDFToString(sin(cfloat)) (type: string), (UDFToDouble(UDFToFloat(cint)) + UDFToDouble(cboolean1)) (type: double) + expressions: UDFToBoolean(ctinyint) (type: boolean), UDFToBoolean(csmallint) (type: boolean), UDFToBoolean(cint) (type: boolean), UDFToBoolean(cbigint) (type: boolean), UDFToBoolean(cfloat) (type: boolean), UDFToBoolean(cdouble) (type: boolean), cboolean1 (type: boolean), UDFToBoolean((cbigint * 0L)) (type: boolean), UDFToBoolean(ctimestamp1) (type: boolean), UDFToBoolean(cstring1) (type: boolean), UDFToInteger(ctinyint) (type: int), UDFToInteger(csmallint) (type: int), cint (type: int), UDFToInteger(cbigint) (type: int), UDFToInteger(cfloat) (type: int), UDFToInteger(cdouble) (type: int), UDFToInteger(cboolean1) (type: int), UDFToInteger(ctimestamp1) (type: int), UDFToInteger(cstring1) (type: int), UDFToInteger(substr(cstring1, 1, 1)) (type: int), UDFToByte(cfloat) (type: tinyint), UDFToShort(cfloat) (type: smallint), UDFToLong(cfloat) (type: bigint), UDFToDouble(ctinyint) (type: double), UDFToDouble(csmallint) (type: double), UDFToDouble(cint) (type: double), UDFToDouble(cbigint) (type: double), UDFToDouble(cfloat) (type: double), cdouble (type: double), UDFToDouble(cboolean1) (type: double), UDFToDouble(ctimestamp1) (type: double), UDFToDouble(cstring1) (type: double), UDFToDouble(substr(cstring1, 1, 1)) (type: double), UDFToFloat(cint) (type: float), UDFToFloat(cdouble) (type: float), CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), CAST( CAST( ctimestamp1 AS DATE) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp), UDFToString(ctinyint) (type: string), UDFToString(csmallint) (type: string), UDFToString(cint) (type: string), UDFToString(cbigint) (type: string), UDFToString(cfloat) (type: string), UDFToString(cdouble) (type: string), UDFToString(cboolean1) (type: string), UDFToString((cbigint * 0L)) (type: string), UDFToString(ctimestamp1) (type: string), cstring1 (type: string), UDFToString(CAST( cstring1 AS CHAR(10)) (type: string), UDFToString(CAST( cstring1 AS varchar(10))) (type: string), UDFToFloat(UDFToInteger(cfloat)) (type: float), UDFToDouble((cint * 2)) (type: double), UDFToString(sin(cfloat)) (type: string), (UDFToDouble(UDFToFloat(cint)) + UDFToDouble(cboolean1)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55, _col56, _col57, _col58, _col59, _col60, _col61, _col62 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out index db292508ac..8351192513 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out @@ -265,7 +265,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: fl_time (type: timestamp), to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), dayofweek(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: date), date_add(fl_time, 2) (type: date), date_sub(fl_time, 2) (type: date), datediff(fl_time, '2000-01-01') (type: int), datediff(fl_time, 2000-01-01) (type: int), datediff(fl_time, 2000-01-01 00:00:00.0) (type: int), datediff(fl_time, 2000-01-01 11:13:09.0) (type: int), datediff(fl_time, '2007-03-14') (type: int), datediff(fl_time, 2007-03-14) (type: int), datediff(fl_time, 2007-03-14 00:00:00.0) (type: int), datediff(fl_time, 2007-03-14 08:21:59.0) (type: int) + expressions: fl_time (type: timestamp), to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), dayofweek(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: date), date_add(fl_time, 2) (type: date), date_sub(fl_time, 2) (type: date), datediff(fl_time, '2000-01-01') (type: int), datediff(fl_time, DATE'2000-01-01') (type: int), datediff(fl_time, TIMESTAMP'2000-01-01 00:00:00.0') (type: int), datediff(fl_time, TIMESTAMP'2000-01-01 11:13:09.0') (type: int), datediff(fl_time, '2007-03-14') (type: int), datediff(fl_time, DATE'2007-03-14') (type: int), datediff(fl_time, TIMESTAMP'2007-03-14 00:00:00.0') (type: int), datediff(fl_time, TIMESTAMP'2007-03-14 08:21:59.0') (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 Select Vectorization: className: VectorSelectOperator @@ -557,7 +557,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: fl_date (type: date), to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), dayofweek(fl_date) (type: int), weekofyear(fl_date) (type: int), fl_date (type: date), to_date(fl_date) (type: date), date_add(fl_date, 2) (type: date), date_sub(fl_date, 2) (type: date), datediff(fl_date, '2000-01-01') (type: int), datediff(fl_date, 2000-01-01) (type: int), datediff(fl_date, 2000-01-01 00:00:00.0) (type: int), datediff(fl_date, 2000-01-01 11:13:09.0) (type: int), datediff(fl_date, '2007-03-14') (type: int), datediff(fl_date, 2007-03-14) (type: int), datediff(fl_date, 2007-03-14 00:00:00.0) (type: int), datediff(fl_date, 2007-03-14 08:21:59.0) (type: int) + expressions: fl_date (type: date), to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), dayofweek(fl_date) (type: int), weekofyear(fl_date) (type: int), fl_date (type: date), to_date(fl_date) (type: date), date_add(fl_date, 2) (type: date), date_sub(fl_date, 2) (type: date), datediff(fl_date, '2000-01-01') (type: int), datediff(fl_date, DATE'2000-01-01') (type: int), datediff(fl_date, TIMESTAMP'2000-01-01 00:00:00.0') (type: int), datediff(fl_date, TIMESTAMP'2000-01-01 11:13:09.0') (type: int), datediff(fl_date, '2007-03-14') (type: int), datediff(fl_date, DATE'2007-03-14') (type: int), datediff(fl_date, TIMESTAMP'2007-03-14 00:00:00.0') (type: int), datediff(fl_date, TIMESTAMP'2007-03-14 08:21:59.0') (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 Select Vectorization: className: VectorSelectOperator @@ -853,7 +853,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: fl_time (type: timestamp), fl_date (type: date), (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (dayofweek(fl_time) = dayofweek(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = fl_date) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean), (datediff(fl_time, 2000-01-01) = datediff(fl_date, 2000-01-01)) (type: boolean), (datediff(fl_time, 2000-01-01 00:00:00.0) = datediff(fl_date, 2000-01-01 00:00:00.0)) (type: boolean), (datediff(fl_time, 2000-01-01 11:13:09.0) = datediff(fl_date, 2000-01-01 11:13:09.0)) (type: boolean), (datediff(fl_time, '2007-03-14') = datediff(fl_date, '2007-03-14')) (type: boolean), (datediff(fl_time, 2007-03-14) = datediff(fl_date, 2007-03-14)) (type: boolean), (datediff(fl_time, 2007-03-14 00:00:00.0) = datediff(fl_date, 2007-03-14 00:00:00.0)) (type: boolean), (datediff(fl_time, 2007-03-14 08:21:59.0) = datediff(fl_date, 2007-03-14 08:21:59.0)) (type: boolean), (datediff(fl_date, '2000-01-01') = datediff(fl_date, 2000-01-01)) (type: boolean), (datediff(fl_date, '2007-03-14') = datediff(fl_date, 2007-03-14)) (type: boolean) + expressions: fl_time (type: timestamp), fl_date (type: date), (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (dayofweek(fl_time) = dayofweek(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = fl_date) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean), (datediff(fl_time, DATE'2000-01-01') = datediff(fl_date, DATE'2000-01-01')) (type: boolean), (datediff(fl_time, TIMESTAMP'2000-01-01 00:00:00.0') = datediff(fl_date, TIMESTAMP'2000-01-01 00:00:00.0')) (type: boolean), (datediff(fl_time, TIMESTAMP'2000-01-01 11:13:09.0') = datediff(fl_date, TIMESTAMP'2000-01-01 11:13:09.0')) (type: boolean), (datediff(fl_time, '2007-03-14') = datediff(fl_date, '2007-03-14')) (type: boolean), (datediff(fl_time, DATE'2007-03-14') = datediff(fl_date, DATE'2007-03-14')) (type: boolean), (datediff(fl_time, TIMESTAMP'2007-03-14 00:00:00.0') = datediff(fl_date, TIMESTAMP'2007-03-14 00:00:00.0')) (type: boolean), (datediff(fl_time, TIMESTAMP'2007-03-14 08:21:59.0') = datediff(fl_date, TIMESTAMP'2007-03-14 08:21:59.0')) (type: boolean), (datediff(fl_date, '2000-01-01') = datediff(fl_date, DATE'2000-01-01')) (type: boolean), (datediff(fl_date, '2007-03-14') = datediff(fl_date, DATE'2007-03-14')) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out index 22f2894b8f..15b62c9781 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out @@ -632,10 +632,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -843,10 +843,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -1016,10 +1016,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1189,10 +1189,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1653,22 +1653,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1701,7 +1701,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -1791,9 +1791,9 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs @@ -1811,10 +1811,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -1837,7 +1837,7 @@ STAGE PLANS: Dynamic Partitioning Event Operator Target column: hr (string) Target Input: srcpart - Partition key expr: (UDFToDouble(hr) * 2.0) + Partition key expr: (UDFToDouble(hr) * 2.0D) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Target Vertex: Map 1 Execution mode: vectorized, llap @@ -1858,7 +1858,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1969,19 +1969,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized, llap LLAP IO: all inputs @@ -2002,7 +2002,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -2092,9 +2092,9 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs @@ -2112,10 +2112,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2144,7 +2144,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2248,9 +2248,9 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + key expressions: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) sort order: + - Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: no inputs @@ -2268,10 +2268,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2294,7 +2294,7 @@ STAGE PLANS: Dynamic Partitioning Event Operator Target column: hr (string) Target Input: srcpart - Partition key expr: UDFToString((UDFToDouble(hr) * 2.0)) + Partition key expr: UDFToString((UDFToDouble(hr) * 2.0D)) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Target Vertex: Map 1 Execution mode: vectorized, llap @@ -2315,7 +2315,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + 0 UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) 1 UDFToString(_col0) (type: string) Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2587,10 +2587,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D)) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08')) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08')) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -2733,10 +2733,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -3368,10 +3368,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3518,10 +3518,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3547,10 +3547,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ds (type: string), hr (type: string) @@ -4827,10 +4827,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -4993,10 +4993,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -5294,7 +5294,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) input vertices: 1 Map 3 Statistics: Num rows: 2200 Data size: 404800 Basic stats: COMPLETE Column stats: NONE @@ -5323,22 +5323,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -5445,7 +5445,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) input vertices: 1 Map 3 @@ -5475,10 +5475,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 384 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -5501,7 +5501,7 @@ STAGE PLANS: Dynamic Partitioning Event Operator Target column: hr (string) Target Input: srcpart - Partition key expr: (UDFToDouble(hr) * 2.0) + Partition key expr: (UDFToDouble(hr) * 2.0D) Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE Target Vertex: Map 1 Execution mode: vectorized, llap @@ -6211,10 +6211,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -6326,10 +6326,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -6365,10 +6365,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ds (type: string), hr (type: string) @@ -6820,10 +6820,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 2944 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 1472 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out index 36f1bbf12f..35786eb8a0 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out @@ -127,10 +127,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 13:bigint, val 0)(children: LongColModuloLongScalar(col 3:bigint, val 500) -> 13:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 14:double, val -1.0)(children: FuncSinDoubleToDouble(col 4:float) -> 14:double)) - predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 48960 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double) + expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0D)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out index 9652d36b20..9645a32de0 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out @@ -273,7 +273,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterTimestampColumnInList(col 0:timestamp, values [0001-01-01 00:00:00.0, 0002-02-02 00:00:00.0]) - predicate: (ts) IN (0001-01-01 00:00:00.0, 0002-02-02 00:00:00.0) (type: boolean) + predicate: (ts) IN (TIMESTAMP'0001-01-01 00:00:00.0', TIMESTAMP'0002-02-02 00:00:00.0') (type: boolean) Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out index cefa34909b..c88ba5d67d 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out @@ -1115,7 +1115,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: round(_col0, 0) (type: double), _col1 BETWEEN 8.97077295279421E19 AND 8.97077295279422E19 (type: boolean), _col2 BETWEEN 8.97077295279421E19 AND 8.97077295279422E19 (type: boolean), _col3 BETWEEN 9.20684592523616E19 AND 9.20684592523617E19 (type: boolean), round(_col4, 3) (type: double), round(_col5, 3) (type: double), round(_col6, 3) (type: double), round(_col7, 3) (type: double) + expressions: round(_col0, 0) (type: double), _col1 BETWEEN 8.97077295279421E19D AND 8.97077295279422E19D (type: boolean), _col2 BETWEEN 8.97077295279421E19D AND 8.97077295279422E19D (type: boolean), _col3 BETWEEN 9.20684592523616E19D AND 9.20684592523617E19D (type: boolean), round(_col4, 3) (type: double), round(_col5, 3) (type: double), round(_col6, 3) (type: double), round(_col7, 3) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out index c346ff2cf8..04cb482873 100644 --- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out +++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out @@ -62,7 +62,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 842180 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) + expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Select Vectorization: className: VectorSelectOperator @@ -226,7 +226,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 842180 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) + expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/load_dyn_part13.q.out b/ql/src/test/results/clientpositive/load_dyn_part13.q.out index ce22104c95..89aa84f933 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part13.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part13.q.out @@ -68,7 +68,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '22' (type: string) @@ -104,7 +104,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 40.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 40.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '33' (type: string) diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out index cb4a95758d..95a5c1aa55 100644 --- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out @@ -30,7 +30,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -324,7 +324,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -345,7 +345,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -622,7 +622,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -643,7 +643,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -916,7 +916,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -937,7 +937,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/macro.q.out b/ql/src/test/results/clientpositive/macro.q.out index 35e03caac4..a40d88871c 100644 --- a/ql/src/test/results/clientpositive/macro.q.out +++ b/ql/src/test/results/clientpositive/macro.q.out @@ -29,7 +29,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 0.8807970779778823 (type: double) + expressions: 0.8807970779778823D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -54,7 +54,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator - expressions: 0.8807970779778823 (type: double) + expressions: 0.8807970779778823D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/mapjoin1.q.out b/ql/src/test/results/clientpositive/mapjoin1.q.out index 06ba0ee4c2..b521f62caf 100644 --- a/ql/src/test/results/clientpositive/mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/mapjoin1.q.out @@ -138,7 +138,7 @@ STAGE PLANS: HashTable Sink Operator filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -158,7 +158,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -233,7 +233,7 @@ STAGE PLANS: HashTable Sink Operator filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -253,7 +253,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -415,7 +415,7 @@ STAGE PLANS: HashTable Sink Operator filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -435,7 +435,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -510,7 +510,7 @@ STAGE PLANS: HashTable Sink Operator filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -530,7 +530,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/mapjoin47.q.out b/ql/src/test/results/clientpositive/mapjoin47.q.out index 2904b68813..af7f20f819 100644 --- a/ql/src/test/results/clientpositive/mapjoin47.q.out +++ b/ql/src/test/results/clientpositive/mapjoin47.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(value) BETWEEN 100.0 AND 102.0 and key is not null) (type: boolean) + predicate: (UDFToDouble(value) BETWEEN 100.0D AND 102.0D and key is not null) (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -50,7 +50,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(value) BETWEEN 100.0 AND 102.0 and key is not null) (type: boolean) + predicate: (UDFToDouble(value) BETWEEN 100.0D AND 102.0D and key is not null) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -245,7 +245,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: UDFToDouble(value) BETWEEN 100.0 AND 102.0 (type: boolean) + predicate: UDFToDouble(value) BETWEEN 100.0D AND 102.0D (type: boolean) Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -263,7 +263,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: UDFToDouble(value) BETWEEN 100.0 AND 102.0 (type: boolean) + predicate: UDFToDouble(value) BETWEEN 100.0D AND 102.0D (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -377,7 +377,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((_col0 = _col2) or UDFToDouble(_col1) BETWEEN 100.0 AND 102.0 or UDFToDouble(_col3) BETWEEN 100.0 AND 102.0)} + residual filter predicates: {((_col0 = _col2) or UDFToDouble(_col1) BETWEEN 100.0D AND 102.0D or UDFToDouble(_col3) BETWEEN 100.0D AND 102.0D)} Statistics: Num rows: 9026 Data size: 173876 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -487,7 +487,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0)} {((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0D)} {((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0D)} Statistics: Num rows: 1388 Data size: 26738 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -591,7 +591,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {(((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0) or ((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0))} + residual filter predicates: {(((UDFToDouble(_col0) + UDFToDouble(_col2)) >= 100.0D) or ((UDFToDouble(_col0) + UDFToDouble(_col2)) <= 102.0D))} Statistics: Num rows: 8332 Data size: 160507 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -816,7 +816,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) >= 100.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) >= 100.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col2 (type: string), _col3 (type: string) @@ -951,7 +951,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -1063,7 +1063,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0)} + residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0D)} Statistics: Num rows: 4166 Data size: 80253 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1281,7 +1281,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 @@ -1392,7 +1392,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 - residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0)} + residual filter predicates: {((UDFToDouble(_col2) + UDFToDouble(_col0)) >= 100.0D)} Statistics: Num rows: 4166 Data size: 80253 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) @@ -1555,7 +1555,7 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0)} + residual filter predicates: {((UDFToDouble(_col4) + UDFToDouble(_col0)) <= 102.0D)} Statistics: Num rows: 4583 Data size: 88285 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 diff --git a/ql/src/test/results/clientpositive/masking_1.q.out b/ql/src/test/results/clientpositive/masking_1.q.out index 8bc8bb69b3..28f8005c91 100644 --- a/ql/src/test/results/clientpositive/masking_1.q.out +++ b/ql/src/test/results/clientpositive/masking_1.q.out @@ -444,7 +444,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) % 2.0) = 0.0) and (UDFToDouble(key) < 10.0) and (UDFToDouble(key) > 0.0)) (type: boolean) + predicate: (((UDFToDouble(key) % 2.0D) = 0.0D) and (UDFToDouble(key) < 10.0D) and (UDFToDouble(key) > 0.0D)) (type: boolean) Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), upper(value) (type: string) diff --git a/ql/src/test/results/clientpositive/masking_12.q.out b/ql/src/test/results/clientpositive/masking_12.q.out index c7a7b1bca5..87481b20d1 100644 --- a/ql/src/test/results/clientpositive/masking_12.q.out +++ b/ql/src/test/results/clientpositive/masking_12.q.out @@ -321,7 +321,7 @@ STAGE PLANS: predicate: (((key % 2) = 0) and (key < 10) and (key > 6)) (type: boolean) Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger((UDFToDouble(key) / 2.0)) (type: int) + expressions: UDFToInteger((UDFToDouble(key) / 2.0D)) (type: int) outputColumnNames: _col0 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -389,10 +389,10 @@ STAGE PLANS: insideView TRUE Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToInteger((UDFToDouble(key) / 2.0)) % 2) = 0) and ((key % 2) = 0) and (UDFToInteger((UDFToDouble(key) / 2.0)) < 10) and (key < 10) and (key > 6)) (type: boolean) + predicate: (((UDFToInteger((UDFToDouble(key) / 2.0D)) % 2) = 0) and ((key % 2) = 0) and (UDFToInteger((UDFToDouble(key) / 2.0D)) < 10) and (key < 10) and (key > 6)) (type: boolean) Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger((UDFToDouble(key) / 2.0)) (type: int) + expressions: UDFToInteger((UDFToDouble(key) / 2.0D)) (type: int) outputColumnNames: _col0 Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/masking_3.q.out b/ql/src/test/results/clientpositive/masking_3.q.out index 3a3547dacc..504085af92 100644 --- a/ql/src/test/results/clientpositive/masking_3.q.out +++ b/ql/src/test/results/clientpositive/masking_3.q.out @@ -8215,7 +8215,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(key) % 2.0) = 0.0) and (UDFToDouble(key) < 10.0) and (UDFToDouble(key) > 0.0)) (type: boolean) + predicate: (((UDFToDouble(key) % 2.0D) = 0.0D) and (UDFToDouble(key) < 10.0D) and (UDFToDouble(key) > 0.0D)) (type: boolean) Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), upper(value) (type: string) diff --git a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out index 810e2ba4c0..bc42df0227 100644 --- a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb.q.out @@ -684,7 +684,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((d_year = 1993) and (lo_quantity < 25.0) and lo_discount BETWEEN 1.0 AND 3.0) (type: boolean) + predicate: ((d_year = 1993) and (lo_quantity < 25.0D) and lo_discount BETWEEN 1.0D AND 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -753,7 +753,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((d_yearmonthnum = 199401) and lo_discount BETWEEN 4.0 AND 6.0 and lo_quantity BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((d_yearmonthnum = 199401) and lo_discount BETWEEN 4.0D AND 6.0D and lo_quantity BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -824,7 +824,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((d_weeknuminyear = 6) and (d_year = 1994) and lo_discount BETWEEN 5.0 AND 7.0 and lo_quantity BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((d_weeknuminyear = 6) and (d_year = 1994) and lo_discount BETWEEN 5.0D AND 7.0D and lo_quantity BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: discounted_price (type: double) diff --git a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out index 154dd64abb..d561208fd8 100644 --- a/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out +++ b/ql/src/test/results/clientpositive/materialized_view_rewrite_ssb_2.q.out @@ -686,7 +686,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToDouble(lo_quantity) < 25.0) and (UDFToInteger(d_year) = 1993) and UDFToDouble(lo_discount) BETWEEN 1.0 AND 3.0) (type: boolean) + predicate: ((UDFToDouble(lo_quantity) < 25.0D) and (UDFToInteger(d_year) = 1993) and UDFToDouble(lo_discount) BETWEEN 1.0D AND 3.0D) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -755,7 +755,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToInteger(d_yearmonthnum) = 199401) and UDFToDouble(lo_discount) BETWEEN 4.0 AND 6.0 and UDFToDouble(lo_quantity) BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((UDFToInteger(d_yearmonthnum) = 199401) and UDFToDouble(lo_discount) BETWEEN 4.0D AND 6.0D and UDFToDouble(lo_quantity) BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: discounted_price (type: double) @@ -826,7 +826,7 @@ STAGE PLANS: alias: default.ssb_mv Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToInteger(d_weeknuminyear) = 6) and (UDFToInteger(d_year) = 1994) and UDFToDouble(lo_discount) BETWEEN 5.0 AND 7.0 and UDFToDouble(lo_quantity) BETWEEN 26.0 AND 35.0) (type: boolean) + predicate: ((UDFToInteger(d_weeknuminyear) = 6) and (UDFToInteger(d_year) = 1994) and UDFToDouble(lo_discount) BETWEEN 5.0D AND 7.0D and UDFToDouble(lo_quantity) BETWEEN 26.0D AND 35.0D) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: discounted_price (type: double) diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out index 8b0b0e7672..6288584c51 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out @@ -149,7 +149,7 @@ STAGE PLANS: alias: srcpart_merge_dp_rc Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 2.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 2.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out index f9ac40db58..bd151daa24 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out @@ -125,7 +125,7 @@ STAGE PLANS: alias: srcpart_merge_dp_rc Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string) + expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0D) = 0.0D), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/mergejoins.q.out b/ql/src/test/results/clientpositive/mergejoins.q.out index 1d9c871c3d..1e05fe00c9 100644 --- a/ql/src/test/results/clientpositive/mergejoins.q.out +++ b/ql/src/test/results/clientpositive/mergejoins.q.out @@ -251,7 +251,7 @@ STAGE PLANS: Left Outer Join 1 to 2 filter predicates: 0 - 1 {(UDFToDouble(KEY.reducesinkkey0) < 10.0)} + 1 {(UDFToDouble(KEY.reducesinkkey0) < 10.0D)} 2 keys: 0 _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out index 55ed49d194..7d28762fa2 100644 --- a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out @@ -64,7 +64,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 8 Data size: 61 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -93,7 +93,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/noalias_subq1.q.out b/ql/src/test/results/clientpositive/noalias_subq1.q.out index dbe552ca1f..413147becb 100644 --- a/ql/src/test/results/clientpositive/noalias_subq1.q.out +++ b/ql/src/test/results/clientpositive/noalias_subq1.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out index 0cd623de9a..31ef00a1cb 100644 --- a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out +++ b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out @@ -14,7 +14,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) + 1.0) (type: double), ((UDFToDouble(key) + 1.0) + 1.0) (type: double) + expressions: (UDFToDouble(key) + 1.0D) (type: double), ((UDFToDouble(key) + 1.0D) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out index dffa80d05a..4c7716c2c1 100644 --- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out +++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out @@ -142,7 +142,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -377,7 +377,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -466,7 +466,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) @@ -511,7 +511,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) @@ -769,7 +769,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 250.0) and (UDFToDouble(key) > 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 250.0D) and (UDFToDouble(key) > 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), key (type: string) @@ -907,7 +907,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (rand() > 1.0) (type: boolean) + predicate: (rand() > 1.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) diff --git a/ql/src/test/results/clientpositive/notable_alias1.q.out b/ql/src/test/results/clientpositive/notable_alias1.q.out index 03f2649fc4..3d36ae5915 100644 --- a/ql/src/test/results/clientpositive/notable_alias1.q.out +++ b/ql/src/test/results/clientpositive/notable_alias1.q.out @@ -28,7 +28,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/notable_alias2.q.out b/ql/src/test/results/clientpositive/notable_alias2.q.out index 7186513b89..7fd139f76f 100644 --- a/ql/src/test/results/clientpositive/notable_alias2.q.out +++ b/ql/src/test/results/clientpositive/notable_alias2.q.out @@ -28,7 +28,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/nullgroup.q.out b/ql/src/test/results/clientpositive/nullgroup.q.out index 53c74b4f8e..cccc3c4de8 100644 --- a/ql/src/test/results/clientpositive/nullgroup.q.out +++ b/ql/src/test/results/clientpositive/nullgroup.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -76,7 +76,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -137,7 +137,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -213,7 +213,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/nullgroup2.q.out b/ql/src/test/results/clientpositive/nullgroup2.q.out index 3886a98bb8..c2c1c0667d 100644 --- a/ql/src/test/results/clientpositive/nullgroup2.q.out +++ b/ql/src/test/results/clientpositive/nullgroup2.q.out @@ -17,7 +17,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -102,7 +102,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -164,7 +164,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) @@ -242,7 +242,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/nullgroup4.q.out b/ql/src/test/results/clientpositive/nullgroup4.q.out index b687c559f5..f4b66a9fcd 100644 --- a/ql/src/test/results/clientpositive/nullgroup4.q.out +++ b/ql/src/test/results/clientpositive/nullgroup4.q.out @@ -17,7 +17,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -104,7 +104,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -197,7 +197,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -276,7 +276,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/nullgroup4_multi_distinct.q.out b/ql/src/test/results/clientpositive/nullgroup4_multi_distinct.q.out index 707657987a..3559ceff17 100644 --- a/ql/src/test/results/clientpositive/nullgroup4_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/nullgroup4_multi_distinct.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), substr(value, 5) (type: string) @@ -80,7 +80,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), substr(value, 5) (type: string) diff --git a/ql/src/test/results/clientpositive/num_op_type_conv.q.out b/ql/src/test/results/clientpositive/num_op_type_conv.q.out index 8e714f4ad7..4dd23e9f4d 100644 --- a/ql/src/test/results/clientpositive/num_op_type_conv.q.out +++ b/ql/src/test/results/clientpositive/num_op_type_conv.q.out @@ -20,7 +20,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: null (type: int), null (type: double), null (type: void), 1 (type: bigint), 0 (type: bigint), 0.0 (type: double) + expressions: null (type: int), null (type: double), null (type: void), 1L (type: bigint), 0L (type: bigint), 0.0D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 500 Data size: 12016 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/orc_merge5.q.out b/ql/src/test/results/clientpositive/orc_merge5.q.out index 970c4ddad9..91e89ccbf4 100644 --- a/ql/src/test/results/clientpositive/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/orc_merge5.q.out @@ -37,10 +37,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) @@ -153,10 +153,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/orc_merge6.q.out b/ql/src/test/results/clientpositive/orc_merge6.q.out index a08eb09cee..68034dac4c 100644 --- a/ql/src/test/results/clientpositive/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/orc_merge6.q.out @@ -37,10 +37,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) @@ -206,10 +206,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out index e8d504a9cb..d9cb7877d3 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out @@ -39,7 +39,7 @@ STAGE PLANS: alias: orc_merge5 Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/order_by_expr_1.q.out b/ql/src/test/results/clientpositive/order_by_expr_1.q.out index 0a29419c54..af53b77515 100644 --- a/ql/src/test/results/clientpositive/order_by_expr_1.q.out +++ b/ql/src/test/results/clientpositive/order_by_expr_1.q.out @@ -59,7 +59,7 @@ STAGE PLANS: alias: t Statistics: Num rows: 9 Data size: 37 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + expressions: INTERVAL'5-5' (type: interval_year_month), INTERVAL'-1-1' (type: interval_year_month) outputColumnNames: _col0, _col1 Statistics: Num rows: 9 Data size: 144 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.out index 0faacd2fdf..e087ac5f02 100644 --- a/ql/src/test/results/clientpositive/outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out @@ -30,7 +30,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -324,7 +324,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -345,7 +345,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_0.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_0.q.out index a790fc18c2..a5bf8eb57c 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_0.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_0.q.out @@ -1586,7 +1586,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0 + _col0) (type: double), _col1 (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) + (-6432.0 + _col0)) (type: double), _col2 (type: double), (- (-6432.0 + _col0)) (type: double), (-6432.0 + (- (-6432.0 + _col0))) (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) / (- (-6432.0 + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0 + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) + expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0D + _col0) (type: double), _col1 (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) + (-6432.0D + _col0)) (type: double), _col2 (type: double), (- (-6432.0D + _col0)) (type: double), (-6432.0D + (- (-6432.0D + _col0))) (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) / (- (-6432.0D + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0D + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_1.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_1.q.out index 637c5be0f9..7aa2a3995a 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_1.q.out @@ -121,7 +121,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 / -26.28) (type: double), _col1 (type: double), (-1.389 + _col1) (type: double), (_col1 * (-1.389 + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389 + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175 % (- (_col1 * (-1.389 + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) + expressions: _col0 (type: double), (_col0 / -26.28D) (type: double), _col1 (type: double), (-1.389D + _col1) (type: double), (_col1 * (-1.389D + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389D + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175D % (- (_col1 * (-1.389D + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_10.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_10.q.out index 6e87257715..43c4e268fc 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_10.q.out @@ -69,10 +69,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringGroupColLessEqualStringScalar(col 7:string, val 10), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterDecimalScalarGreaterEqualDecimalColumn(val -5638.15, col 14:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(6,2))), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 6981.0), FilterExprOrExpr(children: FilterDecimalColEqualDecimalScalar(col 15:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(11,4)), FilterStringColLikeStringScalar(col 6:string, pattern %a)))) - predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) + predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0D) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) Statistics: Num rows: 9557 Data size: 114684 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639 - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) + expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0D) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639D - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_11.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_11.q.out index fb3707b305..4e72a64c22 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_11.q.out @@ -54,7 +54,7 @@ STAGE PLANS: predicate: ((cstring2 = cstring1) or (ctimestamp1 is null and (cstring1 like '%a'))) (type: boolean) Statistics: Num rows: 9216 Data size: 110592 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0) (type: double), (cdouble * -5638.15) (type: double) + expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639D) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0D) (type: double), (cdouble * -5638.15D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_12.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_12.q.out index a9215d5f3a..f507aeaefc 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_12.q.out @@ -149,7 +149,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1877 Data size: 22524 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0 * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0 * _col0) / -6432.0) (type: double), (- ((-6432.0 * _col0) / -6432.0)) (type: double), _col6 (type: double), (- (-6432.0 * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0 * _col0)) (type: double), (- (- ((-6432.0 * _col0) / -6432.0))) (type: double), (((-6432.0 * _col0) / -6432.0) + (- (-6432.0 * _col0))) (type: double), _col8 (type: double) + expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0D * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0D * _col0) / -6432.0D) (type: double), (- ((-6432.0D * _col0) / -6432.0D)) (type: double), _col6 (type: double), (- (-6432.0D * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0D * _col0)) (type: double), (- (- ((-6432.0D * _col0) / -6432.0D))) (type: double), (((-6432.0D * _col0) / -6432.0D) + (- (-6432.0D * _col0))) (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19 Statistics: Num rows: 1877 Data size: 22524 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_13.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_13.q.out index 55b6afc0d6..fa77db10c5 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_13.q.out @@ -88,7 +88,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 11.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 12.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > 11.0D) and (UDFToDouble(ctimestamp2) <> 12.0D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 32760 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -151,7 +151,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -417,7 +417,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -1.388)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val -1.3359999999999999)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > -1.388D) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 32760 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -474,7 +474,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_14.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_14.q.out index f127434557..2d9ecd2b0e 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_14.q.out @@ -88,10 +88,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColLessEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterTimestampColLessTimestampColumn(col 9:timestamp, col 8:timestamp)), FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -257), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float))) - predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) + predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257L) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) Statistics: Num rows: 606 Data size: 7272 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -152,7 +152,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 303 Data size: 3636 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28 + _col2) (type: double), (- (-26.28 + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28 + _col2)) / 10.175) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28 + _col2)) / 10.175)) (type: double), (-1.389 % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) + expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175D) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 303 Data size: 3636 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_15.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_15.q.out index 6ada2888ec..2cebc9f945 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_15.q.out @@ -84,7 +84,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %ss%), FilterStringColLikeStringScalar(col 6:string, pattern 10%), FilterExprAndExpr(children: FilterLongColGreaterEqualLongScalar(col 2:int, val -75), FilterLongColEqualLongColumn(col 0:smallint, col 1:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -3728.0))) - predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) + predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0D)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -146,7 +146,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 6144 Data size: 73728 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) + expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 6144 Data size: 73728 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_16.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_16.q.out index 1174aa75b9..4175c782c1 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_16.q.out @@ -61,7 +61,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 49152 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -124,7 +124,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_17.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_17.q.out index 8f26d41a48..a325810ad7 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_17.q.out @@ -69,10 +69,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -23), FilterExprOrExpr(children: FilterDoubleColNotEqualDoubleScalar(col 5:double, val 988888.0), FilterDecimalColGreaterDecimalScalar(col 13:decimal(13,3), val -863.257)(children: CastLongToDecimal(col 2:int) -> 13:decimal(13,3))), FilterExprOrExpr(children: FilterLongColGreaterEqualLongScalar(col 0:tinyint, val 33), FilterLongColGreaterEqualLongColumn(col 1:bigint, col 3:bigint)(children: col 1:smallint), FilterDoubleColEqualDoubleColumn(col 4:double, col 5:double)(children: col 4:float))) - predicate: (((cdouble <> 988888.0) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23)) (type: boolean) + predicate: (((cdouble <> 988888.0D) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33Y) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23L)) (type: boolean) Statistics: Num rows: 4096 Data size: 49152 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58 + (- (- cdouble))) (type: double) + expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58D + (- (- cdouble))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_2.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_2.q.out index ebef240d4a..fbaa7fa8b7 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_2.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_2.q.out @@ -67,7 +67,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessTimestampColumn(col 8:timestamp, col 9:timestamp), FilterStringColLikeStringScalar(col 7:string, pattern b%), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -5638.14990234375)), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterDoubleScalarNotEqualDoubleColumn(val -10669.0, col 13:double)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterLongScalarGreaterLongColumn(val 359, col 2:int)))) - predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0 <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) + predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0D <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) Statistics: Num rows: 4778 Data size: 57336 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cbigint (type: bigint), cfloat (type: float), cdouble (type: double) @@ -125,7 +125,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 % -563.0) (type: double), (_col0 + 762.0) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) + expressions: _col0 (type: double), (_col0 % -563.0D) (type: double), (_col0 + 762.0D) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0D) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_3.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_3.q.out index 2abe821987..d15b230606 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_3.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_3.q.out @@ -72,7 +72,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 14:decimal(22,3))(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterDoubleColEqualDoubleScalar(col 13:double, val -29071.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterDecimalScalarLessEqualDecimalColumn(val 79.553, col 15:decimal(8,3))(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(8,3)), FilterTimestampColGreaterTimestampColumn(col 8:timestamp, col 9:timestamp))) - predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0))) (type: boolean) + predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0D))) (type: boolean) Statistics: Num rows: 2503 Data size: 30036 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float) @@ -130,7 +130,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 - 10.175) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175)) (type: double), (- _col1) (type: double), (_col0 % 79.553) (type: double), (- (_col0 * (_col0 - 10.175))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175))) / (_col0 - 10.175)) (type: double), (- (_col0 - 10.175)) (type: double), _col4 (type: double), (-3728.0 - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) + expressions: _col0 (type: double), (_col0 - 10.175D) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175D)) (type: double), (- _col1) (type: double), (_col0 % 79.553D) (type: double), (- (_col0 * (_col0 - 10.175D))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175D))) / (_col0 - 10.175D)) (type: double), (- (_col0 - 10.175D)) (type: double), _col4 (type: double), (-3728.0D - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_4.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_4.q.out index 4c39858751..1cc021d507 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_4.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_4.q.out @@ -67,7 +67,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterEqualLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterExprAndExpr(children: FilterLongScalarGreaterEqualLongColumn(val -89010, col 0:int)(children: col 0:tinyint), FilterDoubleColGreaterDoubleScalar(col 5:double, val 79.553)), FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -563, col 3:bigint), FilterExprOrExpr(children: FilterLongColNotEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterDoubleScalarGreaterEqualDoubleColumn(val -3728.0, col 5:double)))) - predicate: (((-563 <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0 >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) + predicate: (((-563L <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0D >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553D)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cdouble (type: double) @@ -125,7 +125,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: bigint), (_col0 * -563) (type: bigint), (-3728 + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2)) (type: double), ((-3728 + _col0) - (_col0 * -563)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2))) (type: double) + expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2)) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_5.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_5.q.out index b445dfbe77..bebf00a115 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_5.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_5.q.out @@ -119,7 +119,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0 % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0D % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_6.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_6.q.out index 3d7d5e3c85..cc552ae26f 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_6.q.out @@ -63,10 +63,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 10:boolean, val 0), FilterLongColGreaterEqualLongColumn(col 11:boolean, col 10:boolean)), FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:bigint), FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %a), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -257.0))))) - predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0)) (type: boolean) + predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 11605 Data size: 139260 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28 / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) + expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28D / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_7.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_7.q.out index 77caa03107..b4aa7143e3 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_7.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_7.q.out @@ -75,7 +75,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -15.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 65532 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) @@ -295,7 +295,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 7.6850000000000005)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 65532 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_8.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_8.q.out index caa4bff1d6..ddec91f157 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_8.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_8.q.out @@ -71,10 +71,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 10.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 16.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0D) and (UDFToDouble(ctimestamp2) <> 16.0D))) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator @@ -278,10 +278,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 12.503)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 11.998)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503D) and (UDFToDouble(ctimestamp2) <> 11.998D))) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_9.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_9.q.out index 1174aa75b9..4175c782c1 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_9.q.out @@ -61,7 +61,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 49152 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -124,7 +124,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_div0.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_div0.q.out index 4a5cca6d22..e3aec1e738 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_div0.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_div0.q.out @@ -22,7 +22,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: (cdouble / 0.0) (type: double) + expressions: (cdouble / 0.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -201,7 +201,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val 0), FilterLongColLessLongScalar(col 3:bigint, val 100000000)) - predicate: ((cbigint < 100000000) and (cbigint > 0)) (type: boolean) + predicate: ((cbigint < 100000000L) and (cbigint > 0L)) (type: boolean) Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (cbigint - 988888) (type: bigint), (cdouble / UDFToDouble((cbigint - 988888))) (type: double), (1.2 / CAST( (cbigint - 988888) AS decimal(19,0))) (type: decimal(22,21)) @@ -399,10 +399,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -500.0), FilterDoubleColLessDoubleScalar(col 5:double, val -199.0)) - predicate: ((cdouble < -199.0) and (cdouble >= -500.0)) (type: boolean) + predicate: ((cdouble < -199.0D) and (cdouble >= -500.0D)) (type: boolean) Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 200.0) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0)) (type: double), ((cdouble + 200.0) / (cdouble + 200.0)) (type: double), (3.0 / (cdouble + 200.0)) (type: double), (1.2 / (cdouble + 200.0)) (type: double) + expressions: (cdouble + 200.0D) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0D)) (type: double), ((cdouble + 200.0D) / (cdouble + 200.0D)) (type: double), (3.0D / (cdouble + 200.0D)) (type: double), (1.2D / (cdouble + 200.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col4, _col5 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_limit.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_limit.q.out index f3e98e9d10..afafb4d66f 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_limit.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_limit.q.out @@ -213,7 +213,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct] Select Operator - expressions: ctinyint (type: tinyint), (cdouble + 1.0) (type: double) + expressions: ctinyint (type: tinyint), (cdouble + 1.0D) (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out b/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out index f1eb466021..3d9307bcd4 100644 --- a/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/parquet_vectorization_part_project.q.out @@ -66,7 +66,7 @@ STAGE PLANS: alias: alltypesparquet_part Statistics: Num rows: 200 Data size: 2400 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 2.0) (type: double) + expressions: (cdouble + 2.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 200 Data size: 2400 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out index 03fcc62358..58bebce3fa 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out @@ -68,7 +68,7 @@ STAGE PLANS: alias: partition_test_partitioned Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(dt) <= 102.0) and (UDFToDouble(dt) >= 100.0)) (type: boolean) + predicate: ((UDFToDouble(dt) <= 102.0D) and (UDFToDouble(dt) >= 100.0D)) (type: boolean) Statistics: Num rows: 8 Data size: 58 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), dt (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) @@ -181,7 +181,7 @@ STAGE PLANS: alias: partition_test_partitioned Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(dt) <= 102.0) and (UDFToDouble(dt) >= 100.0)) (type: boolean) + predicate: ((UDFToDouble(dt) <= 102.0D) and (UDFToDouble(dt) >= 100.0D)) (type: boolean) Statistics: Num rows: 8 Data size: 58 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), dt (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out index d5d900f5f4..f9a0c4b66a 100644 --- a/ql/src/test/results/clientpositive/pcr.q.out +++ b/ql/src/test/results/clientpositive/pcr.q.out @@ -4770,7 +4770,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) = 11.0) (type: boolean) + predicate: (UDFToDouble(key) = 11.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) @@ -4953,7 +4953,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) = 11.0) (type: boolean) + predicate: (UDFToDouble(key) = 11.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/perf/spark/query12.q.out b/ql/src/test/results/clientpositive/perf/spark/query12.q.out index d1796248f2..411ad8875a 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query12.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query12.q.out @@ -78,7 +78,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 2001-01-12 00:00:00.0 AND 2001-02-11 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-01-12 00:00:00.0' AND TIMESTAMP'2001-02-11 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query16.q.out b/ql/src/test/results/clientpositive/perf/spark/query16.q.out index 1b763e278e..743f27dd8d 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query16.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query16.q.out @@ -98,7 +98,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 2001-04-01 00:00:00.0 AND 2001-05-31 01:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-04-01 00:00:00.0' AND TIMESTAMP'2001-05-31 01:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query18.q.out b/ql/src/test/results/clientpositive/perf/spark/query18.q.out index 33b2f62386..0da17dadda 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query18.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query18.q.out @@ -306,7 +306,7 @@ STAGE PLANS: Statistics: Num rows: 421645953 Data size: 57099332415 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col4), avg(_col5), avg(_col6), avg(_col7), avg(_col8), avg(_col9), avg(_col10) - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 2108229765 Data size: 285496662075 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query20.q.out b/ql/src/test/results/clientpositive/perf/spark/query20.q.out index ab1b24dbf9..8aa57e146c 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query20.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query20.q.out @@ -70,7 +70,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 2001-01-12 00:00:00.0 AND 2001-02-11 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-01-12 00:00:00.0' AND TIMESTAMP'2001-02-11 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query21.q.out b/ql/src/test/results/clientpositive/perf/spark/query21.q.out index e73a627332..5c3d2fb478 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query21.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query21.q.out @@ -96,7 +96,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-08 23:00:00.0 AND 1998-05-08 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-08 23:00:00.0' AND TIMESTAMP'1998-05-08 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int), d_date (type: string) @@ -188,7 +188,7 @@ STAGE PLANS: 1 Map 7 Statistics: Num rows: 50024305 Data size: 790375939 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col10 (type: string), _col7 (type: string), CASE WHEN ((CAST( _col5 AS DATE) < 1998-04-08)) THEN (_col3) ELSE (0) END (type: int), CASE WHEN ((CAST( _col5 AS DATE) >= 1998-04-08)) THEN (_col3) ELSE (0) END (type: int) + expressions: _col10 (type: string), _col7 (type: string), CASE WHEN ((CAST( _col5 AS DATE) < DATE'1998-04-08')) THEN (_col3) ELSE (0) END (type: int), CASE WHEN ((CAST( _col5 AS DATE) >= DATE'1998-04-08')) THEN (_col3) ELSE (0) END (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 50024305 Data size: 790375939 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -212,7 +212,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 25012152 Data size: 395187961 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col2 > 0)) THEN ((UDFToDouble(_col3) / UDFToDouble(_col2)) BETWEEN 0.666667 AND 1.5) ELSE (null) END (type: boolean) + predicate: CASE WHEN ((_col2 > 0L)) THEN ((UDFToDouble(_col3) / UDFToDouble(_col2)) BETWEEN 0.666667D AND 1.5D) ELSE (null) END (type: boolean) Statistics: Num rows: 12506076 Data size: 197593980 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/perf/spark/query22.q.out b/ql/src/test/results/clientpositive/perf/spark/query22.q.out index 38e4119afc..0353312009 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query22.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query22.q.out @@ -165,7 +165,7 @@ STAGE PLANS: Statistics: Num rows: 50024305 Data size: 790375939 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col3) - keys: _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), 0 (type: bigint) + keys: _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 250121525 Data size: 3951879695 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query23.q.out b/ql/src/test/results/clientpositive/perf/spark/query23.q.out index 8b5a83a58a..746e44df09 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query23.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query23.q.out @@ -824,7 +824,7 @@ STAGE PLANS: outputColumnNames: _col0, _col3 Statistics: Num rows: 348477374 Data size: 30742775095 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col3 > 4) (type: boolean) + predicate: (_col3 > 4L) (type: boolean) Statistics: Num rows: 116159124 Data size: 10247591639 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query27.q.out b/ql/src/test/results/clientpositive/perf/spark/query27.q.out index a87ec45c11..052f25ea68 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query27.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query27.q.out @@ -211,7 +211,7 @@ STAGE PLANS: Statistics: Num rows: 843315281 Data size: 74397518956 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col2), avg(_col3), avg(_col4), avg(_col5) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 2529945843 Data size: 223192556868 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query32.q.out b/ql/src/test/results/clientpositive/perf/spark/query32.q.out index e7d356d257..6f614e611f 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query32.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query32.q.out @@ -69,7 +69,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-18 00:00:00.0 AND 1998-06-16 01:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00.0' AND TIMESTAMP'1998-06-16 01:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -92,7 +92,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-18 00:00:00.0 AND 1998-06-16 01:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00.0' AND TIMESTAMP'1998-06-16 01:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query34.q.out b/ql/src/test/results/clientpositive/perf/spark/query34.q.out index 17d4280823..80f7ec1478 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query34.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query34.q.out @@ -74,7 +74,7 @@ STAGE PLANS: alias: household_demographics Statistics: Num rows: 7200 Data size: 770400 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.2)) ELSE (null) END and hd_demo_sk is not null) (type: boolean) + predicate: (((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.2D)) ELSE (null) END and hd_demo_sk is not null) (type: boolean) Statistics: Num rows: 1200 Data size: 128400 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hd_demo_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query36.q.out b/ql/src/test/results/clientpositive/perf/spark/query36.q.out index 1a1a991b96..2b4770c686 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query36.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query36.q.out @@ -192,7 +192,7 @@ STAGE PLANS: Statistics: Num rows: 766650239 Data size: 67634106676 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 2299950717 Data size: 202902320028 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query37.q.out b/ql/src/test/results/clientpositive/perf/spark/query37.q.out index d8bff8173d..325861eb31 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query37.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query37.q.out @@ -46,7 +46,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 2001-06-02 00:00:00.0 AND 2001-08-01 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-06-02 00:00:00.0' AND TIMESTAMP'2001-08-01 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query38.q.out b/ql/src/test/results/clientpositive/perf/spark/query38.q.out index b9af9642b5..5d5b7fdf49 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query38.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query38.q.out @@ -398,7 +398,7 @@ STAGE PLANS: outputColumnNames: _col3 Statistics: Num rows: 152458212 Data size: 16545889939 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col3 = 3) (type: boolean) + predicate: (_col3 = 3L) (type: boolean) Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query39.q.out b/ql/src/test/results/clientpositive/perf/spark/query39.q.out index 77b1bdb866..51fcc84e25 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query39.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query39.q.out @@ -283,10 +283,10 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3, _col4 Statistics: Num rows: 25012152 Data size: 395187961 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0.0D)) THEN (false) ELSE (((_col3 / _col4) > 1.0D)) END (type: boolean) Statistics: Num rows: 12506076 Data size: 197593980 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) + expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0D)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12506076 Data size: 197593980 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -358,10 +358,10 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3, _col4 Statistics: Num rows: 25012152 Data size: 395187961 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0.0D)) THEN (false) ELSE (((_col3 / _col4) > 1.0D)) END (type: boolean) Statistics: Num rows: 12506076 Data size: 197593980 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) + expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0D)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12506076 Data size: 197593980 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/perf/spark/query40.q.out b/ql/src/test/results/clientpositive/perf/spark/query40.q.out index ab196dcb5d..f286294d33 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query40.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query40.q.out @@ -92,7 +92,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-08 23:00:00.0 AND 1998-05-08 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-08 23:00:00.0' AND TIMESTAMP'1998-05-08 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int), d_date (type: string) @@ -219,7 +219,7 @@ STAGE PLANS: 1 Map 9 Statistics: Num rows: 421645953 Data size: 57099332415 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col14 (type: string), _col11 (type: string), CASE WHEN ((CAST( _col9 AS DATE) < 1998-04-08)) THEN ((_col4 - COALESCE(_col7,0))) ELSE (0) END (type: decimal(13,2)), CASE WHEN ((CAST( _col9 AS DATE) >= 1998-04-08)) THEN ((_col4 - COALESCE(_col7,0))) ELSE (0) END (type: decimal(13,2)) + expressions: _col14 (type: string), _col11 (type: string), CASE WHEN ((CAST( _col9 AS DATE) < DATE'1998-04-08')) THEN ((_col4 - COALESCE(_col7,0))) ELSE (0) END (type: decimal(13,2)), CASE WHEN ((CAST( _col9 AS DATE) >= DATE'1998-04-08')) THEN ((_col4 - COALESCE(_col7,0))) ELSE (0) END (type: decimal(13,2)) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 421645953 Data size: 57099332415 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/perf/spark/query45.q.out b/ql/src/test/results/clientpositive/perf/spark/query45.q.out index 07af4e2e4b..b674400ee2 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query45.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query45.q.out @@ -284,7 +284,7 @@ STAGE PLANS: outputColumnNames: _col3, _col7, _col8, _col13, _col14, _col15, _col17 Statistics: Num rows: 191667562 Data size: 29319594068 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((substr(_col8, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') or CASE WHEN ((_col14 = 0)) THEN (false) WHEN (_col17 is not null) THEN (true) WHEN (_col13 is null) THEN (null) WHEN ((_col15 < _col14)) THEN (null) ELSE (false) END) (type: boolean) + predicate: ((substr(_col8, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') or CASE WHEN ((_col14 = 0L)) THEN (false) WHEN (_col17 is not null) THEN (true) WHEN (_col13 is null) THEN (null) WHEN ((_col15 < _col14)) THEN (null) ELSE (false) END) (type: boolean) Statistics: Num rows: 191667562 Data size: 29319594068 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col3 (type: decimal(7,2)), _col7 (type: string), _col8 (type: string) diff --git a/ql/src/test/results/clientpositive/perf/spark/query5.q.out b/ql/src/test/results/clientpositive/perf/spark/query5.q.out index 1ba37e050f..90b45c1d92 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query5.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query5.q.out @@ -362,7 +362,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-08-18 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-08-18 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -469,7 +469,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-08-18 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-08-18 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -550,7 +550,7 @@ STAGE PLANS: Statistics: Num rows: 191657181 Data size: 25444391433 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2273797803 Data size: 251290313118 Basic stats: COMPLETE Column stats: NONE @@ -609,7 +609,7 @@ STAGE PLANS: Statistics: Num rows: 182955399 Data size: 24876643188 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2273797803 Data size: 251290313118 Basic stats: COMPLETE Column stats: NONE @@ -685,7 +685,7 @@ STAGE PLANS: Statistics: Num rows: 383320021 Data size: 33442403085 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2273797803 Data size: 251290313118 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query6.q.out b/ql/src/test/results/clientpositive/perf/spark/query6.q.out index 126cf9e11c..3e464f1328 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query6.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query6.q.out @@ -414,7 +414,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 383325119 Data size: 33817053293 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 >= 10) (type: boolean) + predicate: (_col1 >= 10L) (type: boolean) Statistics: Num rows: 127775039 Data size: 11272351038 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: bigint) diff --git a/ql/src/test/results/clientpositive/perf/spark/query67.q.out b/ql/src/test/results/clientpositive/perf/spark/query67.q.out index b0fc41c7a2..26f6775dc7 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query67.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query67.q.out @@ -221,7 +221,7 @@ STAGE PLANS: Statistics: Num rows: 766650239 Data size: 67634106676 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col8) - keys: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 6899852151 Data size: 608706960084 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query70.q.out b/ql/src/test/results/clientpositive/perf/spark/query70.q.out index 7ebb776719..df07790c8f 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query70.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query70.q.out @@ -339,7 +339,7 @@ STAGE PLANS: Statistics: Num rows: 766650239 Data size: 67634106676 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2299950717 Data size: 202902320028 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query72.q.out b/ql/src/test/results/clientpositive/perf/spark/query72.q.out index ea43033733..d2048037f4 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query72.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query72.q.out @@ -358,7 +358,7 @@ STAGE PLANS: outputColumnNames: _col4, _col6, _col7, _col9, _col10, _col16, _col18, _col20 Statistics: Num rows: 510191624 Data size: 69090195216 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(_col20) > (UDFToDouble(_col9) + 5.0)) (type: boolean) + predicate: (UDFToDouble(_col20) > (UDFToDouble(_col9) + 5.0D)) (type: boolean) Statistics: Num rows: 170063874 Data size: 23030064981 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col18 (type: string), _col4 (type: int), _col6 (type: int), _col7 (type: int), _col10 (type: int), _col16 (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query73.q.out b/ql/src/test/results/clientpositive/perf/spark/query73.q.out index b25a16e061..7fec0e1a90 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query73.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query73.q.out @@ -86,7 +86,7 @@ STAGE PLANS: alias: household_demographics Statistics: Num rows: 7200 Data size: 770400 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.0)) ELSE (null) END and hd_demo_sk is not null) (type: boolean) + predicate: (((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.0D)) ELSE (null) END and hd_demo_sk is not null) (type: boolean) Statistics: Num rows: 1200 Data size: 128400 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hd_demo_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query77.q.out b/ql/src/test/results/clientpositive/perf/spark/query77.q.out index 9a77a7b8b4..638bbd9ba3 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query77.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query77.q.out @@ -279,7 +279,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -355,7 +355,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -456,7 +456,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -545,7 +545,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -562,7 +562,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -665,7 +665,7 @@ STAGE PLANS: Statistics: Num rows: 158394413 Data size: 57088528313 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1912659936 Data size: 311808612993 Basic stats: COMPLETE Column stats: NONE @@ -771,7 +771,7 @@ STAGE PLANS: Statistics: Num rows: 95833780 Data size: 13030622681 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1912659936 Data size: 311808612993 Basic stats: COMPLETE Column stats: NONE @@ -857,7 +857,7 @@ STAGE PLANS: Statistics: Num rows: 383325119 Data size: 33817053337 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: int), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: int), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1912659936 Data size: 311808612993 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query8.q.out b/ql/src/test/results/clientpositive/perf/spark/query8.q.out index 13cd0f4ab6..583abbc125 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query8.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query8.q.out @@ -319,7 +319,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 22000000 Data size: 22327357890 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 10) (type: boolean) + predicate: (_col1 > 10L) (type: boolean) Statistics: Num rows: 7333333 Data size: 7442452291 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: substr(_col0, 1, 5) (type: string) @@ -388,7 +388,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 6833333 Data size: 6935012229 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 1014 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/perf/spark/query80.q.out b/ql/src/test/results/clientpositive/perf/spark/query80.q.out index ee769f9b95..9b008eb00d 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query80.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query80.q.out @@ -250,7 +250,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -296,7 +296,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -360,7 +360,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -646,7 +646,7 @@ STAGE PLANS: Statistics: Num rows: 231905279 Data size: 31404633508 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2435062716 Data size: 264270971781 Basic stats: COMPLETE Column stats: NONE @@ -775,7 +775,7 @@ STAGE PLANS: Statistics: Num rows: 115958879 Data size: 15767054151 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2435062716 Data size: 264270971781 Basic stats: COMPLETE Column stats: NONE @@ -848,7 +848,7 @@ STAGE PLANS: Statistics: Num rows: 463823414 Data size: 40918636268 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2), sum(_col3), sum(_col4) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2435062716 Data size: 264270971781 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query82.q.out b/ql/src/test/results/clientpositive/perf/spark/query82.q.out index b45f8d0bfb..fb30019311 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query82.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query82.q.out @@ -46,7 +46,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 2002-05-30 00:00:00.0 AND 2002-07-29 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2002-05-30 00:00:00.0' AND TIMESTAMP'2002-07-29 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query83.q.out b/ql/src/test/results/clientpositive/perf/spark/query83.q.out index dc04bca302..614ae49ec8 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query83.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query83.q.out @@ -737,7 +737,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col5 Statistics: Num rows: 76653825 Data size: 5939181706 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col3 (type: bigint), (((UDFToDouble(_col3) / UDFToDouble(((_col3 + _col1) + _col5))) / 3.0) * 100.0) (type: double), _col1 (type: bigint), (((UDFToDouble(_col1) / UDFToDouble(((_col3 + _col1) + _col5))) / 3.0) * 100.0) (type: double), _col5 (type: bigint), (((UDFToDouble(_col5) / UDFToDouble(((_col3 + _col1) + _col5))) / 3.0) * 100.0) (type: double), (CAST( ((_col3 + _col1) + _col5) AS decimal(19,0)) / 3) (type: decimal(25,6)) + expressions: _col0 (type: string), _col3 (type: bigint), (((UDFToDouble(_col3) / UDFToDouble(((_col3 + _col1) + _col5))) / 3.0D) * 100.0D) (type: double), _col1 (type: bigint), (((UDFToDouble(_col1) / UDFToDouble(((_col3 + _col1) + _col5))) / 3.0D) * 100.0D) (type: double), _col5 (type: bigint), (((UDFToDouble(_col5) / UDFToDouble(((_col3 + _col1) + _col5))) / 3.0D) * 100.0D) (type: double), (CAST( ((_col3 + _col1) + _col5) AS decimal(19,0)) / 3) (type: decimal(25,6)) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 76653825 Data size: 5939181706 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/perf/spark/query86.q.out b/ql/src/test/results/clientpositive/perf/spark/query86.q.out index 80a24d3dd7..ddd97717a2 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query86.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query86.q.out @@ -150,7 +150,7 @@ STAGE PLANS: Statistics: Num rows: 174243235 Data size: 23692040863 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 522729705 Data size: 71076122589 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query87.q.out b/ql/src/test/results/clientpositive/perf/spark/query87.q.out index 5fe3e48dfc..7977a62f0a 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query87.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query87.q.out @@ -277,7 +277,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 87116929 Data size: 11797382219 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 1 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 1L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 87116929 Data size: 11797382219 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -334,7 +334,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 43560808 Data size: 5923010113 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 1 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 1L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 43560808 Data size: 5923010113 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -391,7 +391,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 174238687 Data size: 15371387547 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 2 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 2L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 174238687 Data size: 15371387547 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -419,7 +419,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 130677808 Data size: 13584384883 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col3 * 2) = _col4) and (_col3 > 0)) (type: boolean) + predicate: (((_col3 * 2) = _col4) and (_col3 > 0L)) (type: boolean) Statistics: Num rows: 21779634 Data size: 2264064077 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) @@ -432,7 +432,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 10889817 Data size: 1132032038 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 2 (type: bigint), _col3 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), 2L (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 10889817 Data size: 1132032038 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -464,7 +464,7 @@ STAGE PLANS: outputColumnNames: _col3, _col4 Statistics: Num rows: 27225312 Data size: 3527521010 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col3 * 2) = _col4) and (_col3 > 0)) (type: boolean) + predicate: (((_col3 * 2) = _col4) and (_col3 > 0L)) (type: boolean) Statistics: Num rows: 4537552 Data size: 587920168 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 4537552 Data size: 587920168 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/perf/spark/query9.q.out b/ql/src/test/results/clientpositive/perf/spark/query9.q.out index 4ec20070f7..49c6b7f5f8 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query9.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query9.q.out @@ -803,7 +803,7 @@ STAGE PLANS: 1 Reducer 31 Statistics: Num rows: 36 Data size: 112860 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CASE WHEN ((_col1 > 409437)) THEN (_col2) ELSE (_col3) END (type: decimal(11,6)), CASE WHEN ((_col4 > 4595804)) THEN (_col5) ELSE (_col6) END (type: decimal(11,6)), CASE WHEN ((_col7 > 7887297)) THEN (_col8) ELSE (_col9) END (type: decimal(11,6)), CASE WHEN ((_col10 > 10872978)) THEN (_col11) ELSE (_col12) END (type: decimal(11,6)), CASE WHEN ((_col13 > 43571537)) THEN (_col14) ELSE (_col15) END (type: decimal(11,6)) + expressions: CASE WHEN ((_col1 > 409437L)) THEN (_col2) ELSE (_col3) END (type: decimal(11,6)), CASE WHEN ((_col4 > 4595804L)) THEN (_col5) ELSE (_col6) END (type: decimal(11,6)), CASE WHEN ((_col7 > 7887297L)) THEN (_col8) ELSE (_col9) END (type: decimal(11,6)), CASE WHEN ((_col10 > 10872978L)) THEN (_col11) ELSE (_col12) END (type: decimal(11,6)), CASE WHEN ((_col13 > 43571537L)) THEN (_col14) ELSE (_col15) END (type: decimal(11,6)) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 36 Data size: 112860 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/perf/spark/query92.q.out b/ql/src/test/results/clientpositive/perf/spark/query92.q.out index 677a85cea1..1b73ab5878 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query92.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query92.q.out @@ -73,7 +73,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-18 00:00:00.0 AND 1998-06-16 01:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00.0' AND TIMESTAMP'1998-06-16 01:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) @@ -96,7 +96,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-18 00:00:00.0 AND 1998-06-16 01:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00.0' AND TIMESTAMP'1998-06-16 01:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query94.q.out b/ql/src/test/results/clientpositive/perf/spark/query94.q.out index 39c4476a5c..2af828032f 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query94.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query94.q.out @@ -94,7 +94,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1999-05-01 00:00:00.0 AND 1999-06-30 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1999-05-01 00:00:00.0' AND TIMESTAMP'1999-06-30 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query95.q.out b/ql/src/test/results/clientpositive/perf/spark/query95.q.out index 0d136b6da7..72bb8af3d9 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query95.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query95.q.out @@ -100,7 +100,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 1999-05-01 00:00:00.0 AND 1999-06-30 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1999-05-01 00:00:00.0' AND TIMESTAMP'1999-06-30 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/spark/query98.q.out b/ql/src/test/results/clientpositive/perf/spark/query98.q.out index 24d7de8b71..5264bc02dd 100644 --- a/ql/src/test/results/clientpositive/perf/spark/query98.q.out +++ b/ql/src/test/results/clientpositive/perf/spark/query98.q.out @@ -76,7 +76,7 @@ STAGE PLANS: alias: date_dim Statistics: Num rows: 73049 Data size: 81741831 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (CAST( d_date AS TIMESTAMP) BETWEEN 2001-01-12 00:00:00.0 AND 2001-02-11 00:00:00.0 and d_date_sk is not null) (type: boolean) + predicate: (CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-01-12 00:00:00.0' AND TIMESTAMP'2001-02-11 00:00:00.0' and d_date_sk is not null) (type: boolean) Statistics: Num rows: 8116 Data size: 9081804 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: d_date_sk (type: int) diff --git a/ql/src/test/results/clientpositive/perf/tez/query12.q.out b/ql/src/test/results/clientpositive/perf/tez/query12.q.out index 0506eca00d..fe18e03f2a 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query12.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query12.q.out @@ -132,7 +132,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_36] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 2001-01-12 00:00:00.0 AND 2001-02-11 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-01-12 00:00:00.0' AND TIMESTAMP'2001-02-11 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query14.q.out b/ql/src/test/results/clientpositive/perf/tez/query14.q.out index ef44cdbb6d..1cffcb010d 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query14.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query14.q.out @@ -297,7 +297,7 @@ Stage-0 Reduce Output Operator [RS_568] PartitionCols:_col0, _col1, _col2, _col3, _col4 Group By Operator [GBY_567] (rows=2032776160 width=405) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L Select Operator [SEL_375] (rows=116155905 width=432) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] Filter Operator [FIL_374] (rows=116155905 width=432) @@ -544,7 +544,7 @@ Stage-0 Select Operator [SEL_348] (rows=1 width=108) Output:["_col0","_col1","_col2"] Filter Operator [FIL_347] (rows=1 width=108) - predicate:(_col3 = 3) + predicate:(_col3 = 3L) Group By Operator [GBY_346] (rows=304916424 width=108) Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 71 [SIMPLE_EDGE] @@ -672,7 +672,7 @@ Stage-0 Reduce Output Operator [RS_568] PartitionCols:_col0, _col1, _col2, _col3, _col4 Group By Operator [GBY_567] (rows=2032776160 width=405) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L Select Operator [SEL_564] (rows=58081078 width=432) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] Filter Operator [FIL_563] (rows=58081078 width=432) @@ -868,7 +868,7 @@ Stage-0 Select Operator [SEL_537] (rows=1 width=108) Output:["_col0","_col1","_col2"] Filter Operator [FIL_536] (rows=1 width=108) - predicate:(_col3 = 3) + predicate:(_col3 = 3L) Group By Operator [GBY_535] (rows=304916424 width=108) Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 74 [SIMPLE_EDGE] @@ -909,7 +909,7 @@ Stage-0 Reduce Output Operator [RS_568] PartitionCols:_col0, _col1, _col2, _col3, _col4 Group By Operator [GBY_567] (rows=2032776160 width=405) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3, 0L Select Operator [SEL_187] (rows=232318249 width=385) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] Filter Operator [FIL_186] (rows=232318249 width=385) @@ -1105,7 +1105,7 @@ Stage-0 Select Operator [SEL_160] (rows=1 width=108) Output:["_col0","_col1","_col2"] Filter Operator [FIL_159] (rows=1 width=108) - predicate:(_col3 = 3) + predicate:(_col3 = 3L) Group By Operator [GBY_158] (rows=304916424 width=108) Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 61 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query16.q.out b/ql/src/test/results/clientpositive/perf/tez/query16.q.out index 1c0539199e..45c70fc630 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query16.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query16.q.out @@ -186,7 +186,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_68] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 2001-04-01 00:00:00.0 AND 2001-05-31 01:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-04-01 00:00:00.0' AND TIMESTAMP'2001-05-31 01:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query18.q.out b/ql/src/test/results/clientpositive/perf/tez/query18.q.out index 1f0ec90b41..adb9682fc2 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query18.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query18.q.out @@ -96,7 +96,7 @@ Stage-0 SHUFFLE [RS_43] PartitionCols:_col0, _col1, _col2, _col3, _col4 Group By Operator [GBY_42] (rows=2108229765 width=135) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"],aggregations:["avg(_col4)","avg(_col5)","avg(_col6)","avg(_col7)","avg(_col8)","avg(_col9)","avg(_col10)"],keys:_col0, _col1, _col2, _col3, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"],aggregations:["avg(_col4)","avg(_col5)","avg(_col6)","avg(_col7)","avg(_col8)","avg(_col9)","avg(_col10)"],keys:_col0, _col1, _col2, _col3, 0L Select Operator [SEL_40] (rows=421645953 width=135) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"] Merge Join Operator [MERGEJOIN_83] (rows=421645953 width=135) diff --git a/ql/src/test/results/clientpositive/perf/tez/query20.q.out b/ql/src/test/results/clientpositive/perf/tez/query20.q.out index c07141b368..7aee675df9 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query20.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query20.q.out @@ -124,7 +124,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_36] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 2001-01-12 00:00:00.0 AND 2001-02-11 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-01-12 00:00:00.0' AND TIMESTAMP'2001-02-11 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query21.q.out b/ql/src/test/results/clientpositive/perf/tez/query21.q.out index aed0990277..87f4d74094 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query21.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query21.q.out @@ -78,7 +78,7 @@ Stage-0 <-Reducer 5 [SIMPLE_EDGE] SHUFFLE [RS_28] Filter Operator [FIL_26] (rows=12506076 width=15) - predicate:CASE WHEN ((_col2 > 0)) THEN ((UDFToDouble(_col3) / UDFToDouble(_col2)) BETWEEN 0.666667 AND 1.5) ELSE (null) END + predicate:CASE WHEN ((_col2 > 0L)) THEN ((UDFToDouble(_col3) / UDFToDouble(_col2)) BETWEEN 0.666667D AND 1.5D) ELSE (null) END Group By Operator [GBY_25] (rows=25012152 width=15) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1 <-Reducer 4 [SIMPLE_EDGE] @@ -133,7 +133,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0","_col1"] Filter Operator [FIL_42] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-08 23:00:00.0 AND 1998-05-08 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-08 23:00:00.0' AND TIMESTAMP'1998-05-08 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query22.q.out b/ql/src/test/results/clientpositive/perf/tez/query22.q.out index d13169c25c..efc87ede40 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query22.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query22.q.out @@ -69,7 +69,7 @@ Stage-0 SHUFFLE [RS_23] PartitionCols:_col0, _col1, _col2, _col3, _col4 Group By Operator [GBY_22] (rows=250121525 width=15) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["avg(_col3)"],keys:_col8, _col9, _col10, _col11, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["avg(_col3)"],keys:_col8, _col9, _col10, _col11, 0L Merge Join Operator [MERGEJOIN_45] (rows=50024305 width=15) Conds:RS_18._col1=RS_19._col0(Inner),Output:["_col3","_col8","_col9","_col10","_col11"] <-Map 9 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query23.q.out b/ql/src/test/results/clientpositive/perf/tez/query23.q.out index c7a5041861..14d3c40fb4 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query23.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query23.q.out @@ -293,7 +293,7 @@ Stage-0 Select Operator [SEL_27] (rows=116159124 width=88) Output:["_col1"] Filter Operator [FIL_26] (rows=116159124 width=88) - predicate:(_col3 > 4) + predicate:(_col3 > 4L) Select Operator [SEL_347] (rows=348477374 width=88) Output:["_col0","_col3"] Group By Operator [GBY_25] (rows=348477374 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query27.q.out b/ql/src/test/results/clientpositive/perf/tez/query27.q.out index c6a190553e..adb1bb7f1e 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query27.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query27.q.out @@ -72,7 +72,7 @@ Stage-0 SHUFFLE [RS_30] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_29] (rows=2529945843 width=88) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["avg(_col2)","avg(_col3)","avg(_col4)","avg(_col5)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["avg(_col2)","avg(_col3)","avg(_col4)","avg(_col5)"],keys:_col0, _col1, 0L Select Operator [SEL_27] (rows=843315281 width=88) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] Merge Join Operator [MERGEJOIN_59] (rows=843315281 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query32.q.out b/ql/src/test/results/clientpositive/perf/tez/query32.q.out index 3ad9595910..adb4377fb6 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query32.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query32.q.out @@ -101,7 +101,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_54] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-18 00:00:00.0 AND 1998-06-16 01:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00.0' AND TIMESTAMP'1998-06-16 01:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] <-Reducer 6 [ONE_TO_ONE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query34.q.out b/ql/src/test/results/clientpositive/perf/tez/query34.q.out index c85b4d0df4..54b8e5a117 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query34.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query34.q.out @@ -127,7 +127,7 @@ Stage-0 Select Operator [SEL_11] (rows=1200 width=107) Output:["_col0"] Filter Operator [FIL_55] (rows=1200 width=107) - predicate:(((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.2)) ELSE (null) END and hd_demo_sk is not null) + predicate:(((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.2D)) ELSE (null) END and hd_demo_sk is not null) TableScan [TS_9] (rows=7200 width=107) default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_buy_potential","hd_dep_count","hd_vehicle_count"] <-Reducer 5 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query36.q.out b/ql/src/test/results/clientpositive/perf/tez/query36.q.out index a30ac2e53c..41bcba67da 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query36.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query36.q.out @@ -95,7 +95,7 @@ Stage-0 SHUFFLE [RS_24] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_23] (rows=2299950717 width=88) - Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col0, _col1, 0L Select Operator [SEL_21] (rows=766650239 width=88) Output:["_col0","_col1","_col2","_col3"] Merge Join Operator [MERGEJOIN_52] (rows=766650239 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query37.q.out b/ql/src/test/results/clientpositive/perf/tez/query37.q.out index d5af964b0a..8284990add 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query37.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query37.q.out @@ -97,7 +97,7 @@ Stage-0 Select Operator [SEL_11] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_42] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 2001-06-02 00:00:00.0 AND 2001-08-01 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-06-02 00:00:00.0' AND TIMESTAMP'2001-08-01 00:00:00.0' and d_date_sk is not null) TableScan [TS_9] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query38.q.out b/ql/src/test/results/clientpositive/perf/tez/query38.q.out index 199bed7de5..e0aebc9495 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query38.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query38.q.out @@ -73,7 +73,7 @@ Stage-0 Output:["_col0"],aggregations:["count()"] Select Operator [SEL_82] (rows=1 width=108) Filter Operator [FIL_81] (rows=1 width=108) - predicate:(_col3 = 3) + predicate:(_col3 = 3L) Select Operator [SEL_114] (rows=152458212 width=108) Output:["_col3"] Group By Operator [GBY_80] (rows=152458212 width=108) diff --git a/ql/src/test/results/clientpositive/perf/tez/query39.q.out b/ql/src/test/results/clientpositive/perf/tez/query39.q.out index 793e375fb2..fdcd651351 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query39.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query39.q.out @@ -82,7 +82,7 @@ Stage-0 Select Operator [SEL_55] (rows=12506076 width=15) Output:["_col0","_col1","_col2","_col3"] Filter Operator [FIL_54] (rows=12506076 width=15) - predicate:CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END + predicate:CASE WHEN ((_col4 = 0.0D)) THEN (false) ELSE (((_col3 / _col4) > 1.0D)) END Select Operator [SEL_53] (rows=25012152 width=15) Output:["_col1","_col2","_col3","_col4"] Group By Operator [GBY_52] (rows=25012152 width=15) @@ -146,7 +146,7 @@ Stage-0 Select Operator [SEL_27] (rows=12506076 width=15) Output:["_col0","_col1","_col2","_col3"] Filter Operator [FIL_26] (rows=12506076 width=15) - predicate:CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END + predicate:CASE WHEN ((_col4 = 0.0D)) THEN (false) ELSE (((_col3 / _col4) > 1.0D)) END Select Operator [SEL_25] (rows=25012152 width=15) Output:["_col1","_col2","_col3","_col4"] Group By Operator [GBY_24] (rows=25012152 width=15) diff --git a/ql/src/test/results/clientpositive/perf/tez/query40.q.out b/ql/src/test/results/clientpositive/perf/tez/query40.q.out index 604f741279..b49668fe7a 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query40.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query40.q.out @@ -119,7 +119,7 @@ Stage-0 Select Operator [SEL_8] (rows=8116 width=1119) Output:["_col0","_col1"] Filter Operator [FIL_52] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-08 23:00:00.0 AND 1998-05-08 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-08 23:00:00.0' AND TIMESTAMP'1998-05-08 00:00:00.0' and d_date_sk is not null) TableScan [TS_6] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] <-Reducer 2 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query45.q.out b/ql/src/test/results/clientpositive/perf/tez/query45.q.out index 3efed2e0e1..eee388f1be 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query45.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query45.q.out @@ -73,7 +73,7 @@ Stage-0 Select Operator [SEL_51] (rows=191667562 width=152) Output:["_col3","_col7","_col8"] Filter Operator [FIL_50] (rows=191667562 width=152) - predicate:((substr(_col8, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') or CASE WHEN ((_col14 = 0)) THEN (false) WHEN (_col17 is not null) THEN (true) WHEN (_col13 is null) THEN (null) WHEN ((_col15 < _col14)) THEN (null) ELSE (false) END) + predicate:((substr(_col8, 1, 5)) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') or CASE WHEN ((_col14 = 0L)) THEN (false) WHEN (_col17 is not null) THEN (true) WHEN (_col13 is null) THEN (null) WHEN ((_col15 < _col14)) THEN (null) ELSE (false) END) Select Operator [SEL_49] (rows=191667562 width=152) Output:["_col3","_col7","_col8","_col13","_col14","_col15","_col17"] Merge Join Operator [MERGEJOIN_87] (rows=191667562 width=152) diff --git a/ql/src/test/results/clientpositive/perf/tez/query5.q.out b/ql/src/test/results/clientpositive/perf/tez/query5.q.out index ff197d2871..eb4a8cfd4f 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query5.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query5.q.out @@ -294,7 +294,7 @@ Stage-0 Reduce Output Operator [RS_86] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_85] (rows=2273797803 width=110) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_49] (rows=191657181 width=132) Output:["_col0","_col1","_col2","_col3","_col4"] Group By Operator [GBY_48] (rows=191657181 width=132) @@ -326,7 +326,7 @@ Stage-0 Select Operator [SEL_10] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_120] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-08-18 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-08-18 00:00:00.0' and d_date_sk is not null) TableScan [TS_8] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] <-Union 19 [SIMPLE_EDGE] @@ -352,7 +352,7 @@ Stage-0 Reduce Output Operator [RS_86] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_85] (rows=2273797803 width=110) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_82] (rows=182955399 width=135) Output:["_col0","_col1","_col2","_col3","_col4"] Group By Operator [GBY_81] (rows=182955399 width=135) @@ -421,7 +421,7 @@ Stage-0 Reduce Output Operator [RS_86] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_85] (rows=2273797803 width=110) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_24] (rows=383320021 width=87) Output:["_col0","_col1","_col2","_col3","_col4"] Group By Operator [GBY_23] (rows=383320021 width=87) diff --git a/ql/src/test/results/clientpositive/perf/tez/query6.q.out b/ql/src/test/results/clientpositive/perf/tez/query6.q.out index f1e47581b4..1cd69f755a 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query6.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query6.q.out @@ -79,7 +79,7 @@ Stage-0 <-Reducer 6 [SIMPLE_EDGE] SHUFFLE [RS_73] Filter Operator [FIL_71] (rows=127775039 width=88) - predicate:(_col1 >= 10) + predicate:(_col1 >= 10L) Group By Operator [GBY_70] (rows=383325119 width=88) Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0 <-Reducer 5 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query67.q.out b/ql/src/test/results/clientpositive/perf/tez/query67.q.out index 86c4d7ce14..113a4431c5 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query67.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query67.q.out @@ -125,7 +125,7 @@ Stage-0 SHUFFLE [RS_24] PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Group By Operator [GBY_23] (rows=6899852151 width=88) - Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"],aggregations:["sum(_col8)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"],aggregations:["sum(_col8)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, 0L Select Operator [SEL_21] (rows=766650239 width=88) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"] Merge Join Operator [MERGEJOIN_54] (rows=766650239 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query70.q.out b/ql/src/test/results/clientpositive/perf/tez/query70.q.out index d5ad6e5fcc..2b98ee8c4e 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query70.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query70.q.out @@ -114,7 +114,7 @@ Stage-0 SHUFFLE [RS_49] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_48] (rows=2299950717 width=88) - Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)"],keys:_col0, _col1, 0L Select Operator [SEL_46] (rows=766650239 width=88) Output:["_col0","_col1","_col2"] Merge Join Operator [MERGEJOIN_89] (rows=766650239 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query72.q.out b/ql/src/test/results/clientpositive/perf/tez/query72.q.out index acab54f8b1..0f953f3a66 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query72.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query72.q.out @@ -135,7 +135,7 @@ Stage-0 Select Operator [SEL_45] (rows=170063874 width=135) Output:["_col3","_col8","_col10","_col11","_col14","_col20"] Filter Operator [FIL_44] (rows=170063874 width=135) - predicate:(UDFToDouble(_col20) > (UDFToDouble(_col9) + 5.0)) + predicate:(UDFToDouble(_col20) > (UDFToDouble(_col9) + 5.0D)) Merge Join Operator [MERGEJOIN_138] (rows=510191624 width=135) Conds:RS_41._col1=RS_42._col0(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col16","_col18","_col20"] <-Map 21 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query73.q.out b/ql/src/test/results/clientpositive/perf/tez/query73.q.out index f666e295d2..9d6312fe3e 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query73.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query73.q.out @@ -121,7 +121,7 @@ Stage-0 Select Operator [SEL_11] (rows=1200 width=107) Output:["_col0"] Filter Operator [FIL_55] (rows=1200 width=107) - predicate:(((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.0)) ELSE (null) END and hd_demo_sk is not null) + predicate:(((hd_buy_potential = '>10000') or (hd_buy_potential = 'unknown')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.0D)) ELSE (null) END and hd_demo_sk is not null) TableScan [TS_9] (rows=7200 width=107) default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_buy_potential","hd_dep_count","hd_vehicle_count"] <-Reducer 5 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query77.q.out b/ql/src/test/results/clientpositive/perf/tez/query77.q.out index d10e2266b3..8f64a29843 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query77.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query77.q.out @@ -257,7 +257,7 @@ Stage-0 Reduce Output Operator [RS_124] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_123] (rows=1912659936 width=163) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_75] (rows=158394413 width=360) Output:["_col0","_col1","_col2","_col3","_col4"] Merge Join Operator [MERGEJOIN_188] (rows=158394413 width=360) @@ -279,7 +279,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_162] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] <-Map 27 [SIMPLE_EDGE] @@ -318,7 +318,7 @@ Stage-0 Reduce Output Operator [RS_124] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_123] (rows=1912659936 width=163) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_120] (rows=95833780 width=135) Output:["_col0","_col1","_col2","_col3","_col4"] Merge Join Operator [MERGEJOIN_189] (rows=95833780 width=135) @@ -400,7 +400,7 @@ Stage-0 Reduce Output Operator [RS_124] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_123] (rows=1912659936 width=163) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_43] (rows=383325119 width=88) Output:["_col0","_col1","_col2","_col3","_col4"] Merge Join Operator [MERGEJOIN_187] (rows=383325119 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query8.q.out b/ql/src/test/results/clientpositive/perf/tez/query8.q.out index 8660fc9c57..cc38f7f844 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query8.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query8.q.out @@ -267,7 +267,7 @@ Stage-0 Select Operator [SEL_41] (rows=1 width=1014) Output:["_col0"] Filter Operator [FIL_40] (rows=1 width=1014) - predicate:(_col1 = 2) + predicate:(_col1 = 2L) Group By Operator [GBY_39] (rows=6833333 width=1014) Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0 <-Union 9 [SIMPLE_EDGE] @@ -286,7 +286,7 @@ Stage-0 Select Operator [SEL_28] (rows=7333333 width=1014) Output:["_col0"] Filter Operator [FIL_27] (rows=7333333 width=1014) - predicate:(_col1 > 10) + predicate:(_col1 > 10L) Group By Operator [GBY_26] (rows=22000000 width=1014) Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0 <-Reducer 13 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query80.q.out b/ql/src/test/results/clientpositive/perf/tez/query80.q.out index a64cad7389..7c46ef3e3e 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query80.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query80.q.out @@ -233,7 +233,7 @@ Stage-0 Reduce Output Operator [RS_121] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_120] (rows=2435062716 width=108) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_77] (rows=231905279 width=135) Output:["_col0","_col1","_col2","_col3","_col4"] Group By Operator [GBY_76] (rows=231905279 width=135) @@ -295,7 +295,7 @@ Stage-0 Select Operator [SEL_8] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_187] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-08-04 00:00:00.0 AND 1998-09-03 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00.0' AND TIMESTAMP'1998-09-03 00:00:00.0' and d_date_sk is not null) TableScan [TS_6] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] <-Reducer 27 [SIMPLE_EDGE] @@ -325,7 +325,7 @@ Stage-0 Reduce Output Operator [RS_121] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_120] (rows=2435062716 width=108) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_117] (rows=115958879 width=135) Output:["_col0","_col1","_col2","_col3","_col4"] Group By Operator [GBY_116] (rows=115958879 width=135) @@ -402,7 +402,7 @@ Stage-0 Reduce Output Operator [RS_121] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_120] (rows=2435062716 width=108) - Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L Select Operator [SEL_38] (rows=463823414 width=88) Output:["_col0","_col1","_col2","_col3","_col4"] Group By Operator [GBY_37] (rows=463823414 width=88) diff --git a/ql/src/test/results/clientpositive/perf/tez/query82.q.out b/ql/src/test/results/clientpositive/perf/tez/query82.q.out index 037c2ffc2c..abd8ca5b3e 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query82.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query82.q.out @@ -97,7 +97,7 @@ Stage-0 Select Operator [SEL_11] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_42] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 2002-05-30 00:00:00.0 AND 2002-07-29 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2002-05-30 00:00:00.0' AND TIMESTAMP'2002-07-29 00:00:00.0' and d_date_sk is not null) TableScan [TS_9] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query86.q.out b/ql/src/test/results/clientpositive/perf/tez/query86.q.out index be7e858e22..cd0d413509 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query86.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query86.q.out @@ -86,7 +86,7 @@ Stage-0 SHUFFLE [RS_18] PartitionCols:_col0, _col1, _col2 Group By Operator [GBY_17] (rows=522729705 width=135) - Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)"],keys:_col0, _col1, 0 + Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)"],keys:_col0, _col1, 0L Select Operator [SEL_15] (rows=174243235 width=135) Output:["_col0","_col1","_col2"] Merge Join Operator [MERGEJOIN_40] (rows=174243235 width=135) diff --git a/ql/src/test/results/clientpositive/perf/tez/query87.q.out b/ql/src/test/results/clientpositive/perf/tez/query87.q.out index 8430e8ad60..19f0e0028e 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query87.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query87.q.out @@ -70,7 +70,7 @@ Stage-0 Output:["_col0"],aggregations:["count()"] Select Operator [SEL_98] (rows=4537552 width=129) Filter Operator [FIL_97] (rows=4537552 width=129) - predicate:(((_col3 * 2) = _col4) and (_col3 > 0)) + predicate:(((_col3 * 2) = _col4) and (_col3 > 0L)) Select Operator [SEL_129] (rows=27225312 width=129) Output:["_col3","_col4"] Group By Operator [GBY_96] (rows=27225312 width=129) @@ -144,7 +144,7 @@ Stage-0 Select Operator [SEL_59] (rows=21779634 width=103) Output:["_col0","_col1","_col2"] Filter Operator [FIL_58] (rows=21779634 width=103) - predicate:(((_col3 * 2) = _col4) and (_col3 > 0)) + predicate:(((_col3 * 2) = _col4) and (_col3 > 0L)) Group By Operator [GBY_57] (rows=130677808 width=103) Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2 <-Union 5 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query92.q.out b/ql/src/test/results/clientpositive/perf/tez/query92.q.out index fbb4eb3a55..6009fdf171 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query92.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query92.q.out @@ -112,7 +112,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_57] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1998-03-18 00:00:00.0 AND 1998-06-16 01:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00.0' AND TIMESTAMP'1998-06-16 01:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] <-Reducer 7 [ONE_TO_ONE_EDGE] diff --git a/ql/src/test/results/clientpositive/perf/tez/query94.q.out b/ql/src/test/results/clientpositive/perf/tez/query94.q.out index e18d45b15f..ed7d42b7e1 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query94.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query94.q.out @@ -182,7 +182,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_68] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1999-05-01 00:00:00.0 AND 1999-06-30 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1999-05-01 00:00:00.0' AND TIMESTAMP'1999-06-30 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query95.q.out b/ql/src/test/results/clientpositive/perf/tez/query95.q.out index 15920d9751..46f9ae2a7d 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query95.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query95.q.out @@ -225,7 +225,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_107] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 1999-05-01 00:00:00.0 AND 1999-06-30 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1999-05-01 00:00:00.0' AND TIMESTAMP'1999-06-30 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/perf/tez/query98.q.out b/ql/src/test/results/clientpositive/perf/tez/query98.q.out index da534cdcd3..19bb49cca2 100644 --- a/ql/src/test/results/clientpositive/perf/tez/query98.q.out +++ b/ql/src/test/results/clientpositive/perf/tez/query98.q.out @@ -128,7 +128,7 @@ Stage-0 Select Operator [SEL_5] (rows=8116 width=1119) Output:["_col0"] Filter Operator [FIL_35] (rows=8116 width=1119) - predicate:(CAST( d_date AS TIMESTAMP) BETWEEN 2001-01-12 00:00:00.0 AND 2001-02-11 00:00:00.0 and d_date_sk is not null) + predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-01-12 00:00:00.0' AND TIMESTAMP'2001-02-11 00:00:00.0' and d_date_sk is not null) TableScan [TS_3] (rows=73049 width=1119) default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"] diff --git a/ql/src/test/results/clientpositive/ppd2.q.out b/ql/src/test/results/clientpositive/ppd2.q.out index 43119be016..ab5a37fcd4 100644 --- a/ql/src/test/results/clientpositive/ppd2.q.out +++ b/ql/src/test/results/clientpositive/ppd2.q.out @@ -61,7 +61,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -451,7 +451,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/ppd_gby.q.out b/ql/src/test/results/clientpositive/ppd_gby.q.out index c21e5a3435..14981770bf 100644 --- a/ql/src/test/results/clientpositive/ppd_gby.q.out +++ b/ql/src/test/results/clientpositive/ppd_gby.q.out @@ -44,7 +44,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 < 'val_400') or (_col1 > 30)) (type: boolean) + predicate: ((_col0 < 'val_400') or (_col1 > 30L)) (type: boolean) Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -253,7 +253,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 < 'val_400') or (_col1 > 30)) (type: boolean) + predicate: ((_col0 < 'val_400') or (_col1 > 30L)) (type: boolean) Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/ppd_gby2.q.out b/ql/src/test/results/clientpositive/ppd_gby2.q.out index c65e5c9a4a..201f4a7100 100644 --- a/ql/src/test/results/clientpositive/ppd_gby2.q.out +++ b/ql/src/test/results/clientpositive/ppd_gby2.q.out @@ -47,7 +47,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 < 'val_400') or (_col1 > 30)) (type: boolean) + predicate: ((_col0 < 'val_400') or (_col1 > 30L)) (type: boolean) Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(_col0) @@ -167,7 +167,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 < 'val_400') or (_col1 > 30)) (type: boolean) + predicate: ((_col0 < 'val_400') or (_col1 > 30L)) (type: boolean) Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(_col0) diff --git a/ql/src/test/results/clientpositive/ppd_join2.q.out b/ql/src/test/results/clientpositive/ppd_join2.q.out index 146478a513..3ffd65fd45 100644 --- a/ql/src/test/results/clientpositive/ppd_join2.q.out +++ b/ql/src/test/results/clientpositive/ppd_join2.q.out @@ -96,7 +96,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean) + predicate: ((key <> '306') and (sqrt(key) <> 13.0D) and value is not null) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -1776,7 +1776,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean) + predicate: ((key <> '306') and (sqrt(key) <> 13.0D) and value is not null) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/ppd_join_filter.q.out index 20d308e139..dd916bcdd4 100644 --- a/ql/src/test/results/clientpositive/ppd_join_filter.q.out +++ b/ql/src/test/results/clientpositive/ppd_join_filter.q.out @@ -117,10 +117,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -446,10 +446,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -775,10 +775,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1104,10 +1104,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out index 5a8fb32515..9ee9a19ee3 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out @@ -28,7 +28,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -44,7 +44,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -135,7 +135,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -151,7 +151,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out index 9b6eb6dd45..1eab7bd74f 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out @@ -34,7 +34,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -50,7 +50,7 @@ STAGE PLANS: alias: c Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -65,7 +65,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -398,7 +398,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -414,7 +414,7 @@ STAGE PLANS: alias: c Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -429,7 +429,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/ppd_random.q.out b/ql/src/test/results/clientpositive/ppd_random.q.out index 69c4ed101a..ae2a917c72 100644 --- a/ql/src/test/results/clientpositive/ppd_random.q.out +++ b/ql/src/test/results/clientpositive/ppd_random.q.out @@ -65,7 +65,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (rand() > 0.5) (type: boolean) + predicate: (rand() > 0.5D) (type: boolean) Statistics: Num rows: 60 Data size: 639 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string) @@ -152,7 +152,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (rand() > 0.5) (type: boolean) + predicate: (rand() > 0.5D) (type: boolean) Statistics: Num rows: 60 Data size: 639 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/ppd_udf_col.q.out b/ql/src/test/results/clientpositive/ppd_udf_col.q.out index 9a6f62c475..322662ca85 100644 --- a/ql/src/test/results/clientpositive/ppd_udf_col.q.out +++ b/ql/src/test/results/clientpositive/ppd_udf_col.q.out @@ -20,7 +20,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 100.0) and (rand() <= 0.1)) (type: boolean) + predicate: ((UDFToDouble(key) = 100.0D) and (rand() <= 0.1D)) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), rand() (type: double) @@ -66,7 +66,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 100.0) and (rand() <= 0.1) and (rand() > 0.1)) (type: boolean) + predicate: ((UDFToDouble(key) = 100.0D) and (rand() <= 0.1D) and (rand() > 0.1D)) (type: boolean) Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), rand() (type: double) @@ -153,10 +153,10 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(value) * 10.0) <= 200.0) and (UDFToDouble(key) = 100.0)) (type: boolean) + predicate: (((UDFToDouble(value) * 10.0D) <= 200.0D) and (UDFToDouble(key) = 100.0D)) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), rand() (type: double), (UDFToDouble(value) * 10.0) (type: double) + expressions: key (type: string), rand() (type: double), (UDFToDouble(value) * 10.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -195,7 +195,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 100.0) and (rand() <= 0.1)) (type: boolean) + predicate: ((UDFToDouble(key) = 100.0D) and (rand() <= 0.1D)) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), rand() (type: double) @@ -241,7 +241,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 100.0) and (rand() <= 0.1) and (rand() > 0.1)) (type: boolean) + predicate: ((UDFToDouble(key) = 100.0D) and (rand() <= 0.1D) and (rand() > 0.1D)) (type: boolean) Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), rand() (type: double) @@ -328,10 +328,10 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((UDFToDouble(value) * 10.0) <= 200.0) and (UDFToDouble(key) = 100.0)) (type: boolean) + predicate: (((UDFToDouble(value) * 10.0D) <= 200.0D) and (UDFToDouble(key) = 100.0D)) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), rand() (type: double), (UDFToDouble(value) * 10.0) (type: double) + expressions: key (type: string), rand() (type: double), (UDFToDouble(value) * 10.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_udtf.q.out b/ql/src/test/results/clientpositive/ppd_udtf.q.out index d00852674a..409e982fb1 100644 --- a/ql/src/test/results/clientpositive/ppd_udtf.q.out +++ b/ql/src/test/results/clientpositive/ppd_udtf.q.out @@ -24,7 +24,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 400.0) (type: boolean) + predicate: (UDFToDouble(key) > 400.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: array(key,value) (type: array) @@ -34,7 +34,7 @@ STAGE PLANS: Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE function name: explode Filter Operator - predicate: (UDFToDouble(col) < 450.0) (type: boolean) + predicate: (UDFToDouble(col) < 450.0D) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: col (type: string) diff --git a/ql/src/test/results/clientpositive/ppd_vc.q.out b/ql/src/test/results/clientpositive/ppd_vc.q.out index 1569cf62b7..58b36cf6eb 100644 --- a/ql/src/test/results/clientpositive/ppd_vc.q.out +++ b/ql/src/test/results/clientpositive/ppd_vc.q.out @@ -18,7 +18,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (BLOCK__OFFSET__INSIDE__FILE < 100) (type: boolean) + predicate: (BLOCK__OFFSET__INSIDE__FILE < 100L) (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -350,7 +350,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((BLOCK__OFFSET__INSIDE__FILE < 50) and key is not null) (type: boolean) + predicate: ((BLOCK__OFFSET__INSIDE__FILE < 50L) and key is not null) (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) diff --git a/ql/src/test/results/clientpositive/ppd_windowing1.q.out b/ql/src/test/results/clientpositive/ppd_windowing1.q.out index 1adc8fc0e9..85843b3e08 100644 --- a/ql/src/test/results/clientpositive/ppd_windowing1.q.out +++ b/ql/src/test/results/clientpositive/ppd_windowing1.q.out @@ -80,7 +80,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 2.0) (type: boolean) + predicate: (UDFToDouble(key) > 2.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) @@ -410,7 +410,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 2.0) (type: boolean) + predicate: (UDFToDouble(key) > 2.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) @@ -788,7 +788,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 2.0) (type: boolean) + predicate: (UDFToDouble(key) > 2.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) @@ -1240,7 +1240,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 2.0) (type: boolean) + predicate: (UDFToDouble(key) > 2.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) @@ -1859,7 +1859,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col0) + UDFToDouble(_col1)) > 2.0) (type: boolean) + predicate: ((UDFToDouble(_col0) + UDFToDouble(_col1)) > 2.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1926,7 +1926,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(_col0) + UDFToDouble(_col1)) > 2.0) (type: boolean) + predicate: ((UDFToDouble(_col0) + UDFToDouble(_col1)) > 2.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/ppr_pushdown3.q.out b/ql/src/test/results/clientpositive/ppr_pushdown3.q.out index 0d2038b727..6b37990001 100644 --- a/ql/src/test/results/clientpositive/ppr_pushdown3.q.out +++ b/ql/src/test/results/clientpositive/ppr_pushdown3.q.out @@ -14,7 +14,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/quote1.q.out b/ql/src/test/results/clientpositive/quote1.q.out index 3ecc12a4bc..26cfd49e3d 100644 --- a/ql/src/test/results/clientpositive/quote1.q.out +++ b/ql/src/test/results/clientpositive/quote1.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 300.0) and (UDFToDouble(key) >= 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 300.0D) and (UDFToDouble(key) >= 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out index b3fa9084d2..b5abb3d2b8 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out @@ -16,7 +16,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (rand(1) < 0.1) (type: boolean) + predicate: (rand(1) < 0.1D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out index 0e8ec13893..e0bf5a29ac 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out @@ -34,7 +34,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (rand(1) < 0.1) (type: boolean) + predicate: (rand(1) < 0.1D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out index 96e8e45b55..ed9635fc95 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out @@ -63,7 +63,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0) and (rand(1) < 0.1)) (type: boolean) + predicate: ((UDFToDouble(key) <= 50.0D) and (UDFToDouble(key) >= 10.0D) and (rand(1) < 0.1D)) (type: boolean) Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) @@ -151,7 +151,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) <= 50.0D) and (UDFToDouble(key) >= 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/rcfile_null_value.q.out b/ql/src/test/results/clientpositive/rcfile_null_value.q.out index 2d2bef9be5..21b8d84ac0 100644 --- a/ql/src/test/results/clientpositive/rcfile_null_value.q.out +++ b/ql/src/test/results/clientpositive/rcfile_null_value.q.out @@ -101,7 +101,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -117,7 +117,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/regex_col.q.out b/ql/src/test/results/clientpositive/regex_col.q.out index 70ec3636d6..5c39748be1 100644 --- a/ql/src/test/results/clientpositive/regex_col.q.out +++ b/ql/src/test/results/clientpositive/regex_col.q.out @@ -165,7 +165,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 103.0) (type: boolean) + predicate: (UDFToDouble(key) = 103.0D) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), ds (type: string), hr (type: string) @@ -180,7 +180,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 103.0) (type: boolean) + predicate: (UDFToDouble(key) = 103.0D) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), ds (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/results_cache_2.q.out b/ql/src/test/results/clientpositive/results_cache_2.q.out index ab1b0de9c0..40dd71d772 100644 --- a/ql/src/test/results/clientpositive/results_cache_2.q.out +++ b/ql/src/test/results/clientpositive/results_cache_2.q.out @@ -16,7 +16,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -54,7 +54,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -133,13 +133,13 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: 2012-01-01 01:02:03.0 (type: timestamp) + keys: TIMESTAMP'2012-01-01 01:02:03.0' (type: timestamp) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -157,7 +157,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 2012-01-01 01:02:03.0 (type: timestamp), _col1 (type: bigint) + expressions: TIMESTAMP'2012-01-01 01:02:03.0' (type: timestamp), _col1 (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/results_cache_capacity.q.out b/ql/src/test/results/clientpositive/results_cache_capacity.q.out index 695d47d010..3951cc2aec 100644 --- a/ql/src/test/results/clientpositive/results_cache_capacity.q.out +++ b/ql/src/test/results/clientpositive/results_cache_capacity.q.out @@ -134,7 +134,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 2.0) (type: boolean) + predicate: (UDFToDouble(key) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -201,7 +201,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/results_cache_temptable.q.out b/ql/src/test/results/clientpositive/results_cache_temptable.q.out index 5350dba954..cc96a0c1f3 100644 --- a/ql/src/test/results/clientpositive/results_cache_temptable.q.out +++ b/ql/src/test/results/clientpositive/results_cache_temptable.q.out @@ -78,7 +78,7 @@ STAGE PLANS: alias: tmptab Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE @@ -167,7 +167,7 @@ STAGE PLANS: alias: rct Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE @@ -258,7 +258,7 @@ STAGE PLANS: alias: rct_part Statistics: Num rows: 500 Data size: 10812 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 0.0) and (ds = '2008-04-08')) (type: boolean) + predicate: ((UDFToDouble(key) = 0.0D) and (ds = '2008-04-08')) (type: boolean) Statistics: Num rows: 125 Data size: 2703 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 125 Data size: 2703 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out index b1fa29cb10..b59930072c 100644 --- a/ql/src/test/results/clientpositive/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out @@ -30,7 +30,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -324,7 +324,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -345,7 +345,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -622,7 +622,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -643,7 +643,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -916,7 +916,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -937,7 +937,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/select_unquote_and.q.out b/ql/src/test/results/clientpositive/select_unquote_and.q.out index adf4794ecf..15ab5e5f7a 100644 --- a/ql/src/test/results/clientpositive/select_unquote_and.q.out +++ b/ql/src/test/results/clientpositive/select_unquote_and.q.out @@ -55,7 +55,7 @@ STAGE PLANS: alias: npe_test Statistics: Num rows: 498 Data size: 5290 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(ds) < 1985.0) and (UDFToDouble(ds) > 1970.0)) (type: boolean) + predicate: ((UDFToDouble(ds) < 1985.0D) and (UDFToDouble(ds) > 1970.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/select_unquote_not.q.out b/ql/src/test/results/clientpositive/select_unquote_not.q.out index 1d2bc5fa88..de9e4006e0 100644 --- a/ql/src/test/results/clientpositive/select_unquote_not.q.out +++ b/ql/src/test/results/clientpositive/select_unquote_not.q.out @@ -55,7 +55,7 @@ STAGE PLANS: alias: npe_test Statistics: Num rows: 498 Data size: 5290 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(ds) >= 1970.0) (type: boolean) + predicate: (UDFToDouble(ds) >= 1970.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/select_unquote_or.q.out b/ql/src/test/results/clientpositive/select_unquote_or.q.out index 2a38996f82..c50929cfe0 100644 --- a/ql/src/test/results/clientpositive/select_unquote_or.q.out +++ b/ql/src/test/results/clientpositive/select_unquote_or.q.out @@ -55,7 +55,7 @@ STAGE PLANS: alias: npe_test Statistics: Num rows: 498 Data size: 5290 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(ds) < 1985.0) or (UDFToDouble(ds) > 1970.0)) (type: boolean) + predicate: ((UDFToDouble(ds) < 1985.0D) or (UDFToDouble(ds) > 1970.0D)) (type: boolean) Statistics: Num rows: 332 Data size: 3526 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/semijoin4.q.out b/ql/src/test/results/clientpositive/semijoin4.q.out index 46d1d3f37b..0497a2eaab 100644 --- a/ql/src/test/results/clientpositive/semijoin4.q.out +++ b/ql/src/test/results/clientpositive/semijoin4.q.out @@ -71,7 +71,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((-92 = tinyint_col_46) and bigint_col_13 is not null and decimal1309_col_65 is not null) (type: boolean) + predicate: ((-92Y = tinyint_col_46) and bigint_col_13 is not null and decimal1309_col_65 is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: bigint_col_13 (type: bigint), smallint_col_24 (type: smallint), double_col_60 (type: double), decimal1309_col_65 (type: decimal(13,9)) @@ -87,7 +87,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((tinyint_col_21 = -92) and decimal2709_col_9 is not null and tinyint_col_18 is not null) (type: boolean) + predicate: ((tinyint_col_21 = -92Y) and decimal2709_col_9 is not null and tinyint_col_18 is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: decimal2709_col_9 (type: decimal(27,9)), tinyint_col_18 (type: tinyint) diff --git a/ql/src/test/results/clientpositive/set_processor_namespaces.q.out b/ql/src/test/results/clientpositive/set_processor_namespaces.q.out index c1c82704e9..ca1d82aac9 100644 --- a/ql/src/test/results/clientpositive/set_processor_namespaces.q.out +++ b/ql/src/test/results/clientpositive/set_processor_namespaces.q.out @@ -19,7 +19,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 5.0) (type: boolean) + predicate: (UDFToDouble(key) = 5.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/setop_subq.q.out b/ql/src/test/results/clientpositive/setop_subq.q.out index ac20c4c422..120f0e54ca 100644 --- a/ql/src/test/results/clientpositive/setop_subq.q.out +++ b/ql/src/test/results/clientpositive/setop_subq.q.out @@ -156,7 +156,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -300,7 +300,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -444,7 +444,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join11.q.out b/ql/src/test/results/clientpositive/spark/auto_join11.q.out index 4177b9a47a..4398e0c59b 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join11.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join11.q.out @@ -30,7 +30,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -55,7 +55,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join12.q.out b/ql/src/test/results/clientpositive/spark/auto_join12.q.out index c56e8da4a9..309ad55e25 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join12.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join12.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -55,7 +55,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -81,7 +81,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join13.q.out b/ql/src/test/results/clientpositive/spark/auto_join13.q.out index 9f86f6cb56..e10855dd2b 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join13.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join13.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -54,7 +54,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -79,7 +79,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join14.q.out b/ql/src/test/results/clientpositive/spark/auto_join14.q.out index 356cc85703..510988f291 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join14.q.out @@ -31,7 +31,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -54,7 +54,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join16.q.out b/ql/src/test/results/clientpositive/spark/auto_join16.q.out index c9ccc9bcce..e98d217925 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join16.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join16.q.out @@ -30,7 +30,7 @@ STAGE PLANS: alias: tab Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -55,7 +55,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join27.q.out b/ql/src/test/results/clientpositive/spark/auto_join27.q.out index e49335e31c..1b23ff5ba5 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join27.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join27.q.out @@ -41,7 +41,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -58,7 +58,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join4.q.out b/ql/src/test/results/clientpositive/spark/auto_join4.q.out index 0502e62354..b72f499e44 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join4.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join4.q.out @@ -53,7 +53,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join5.q.out b/ql/src/test/results/clientpositive/spark/auto_join5.q.out index ae83ebc59c..099aa6eed8 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join5.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join5.q.out @@ -53,7 +53,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join6.q.out b/ql/src/test/results/clientpositive/spark/auto_join6.q.out index b1ebb16e54..67bdc58ba8 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join6.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join6.q.out @@ -54,7 +54,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join7.q.out b/ql/src/test/results/clientpositive/spark/auto_join7.q.out index b4257507b6..a410966016 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join7.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join7.q.out @@ -64,7 +64,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -82,7 +82,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -100,7 +100,7 @@ STAGE PLANS: alias: src3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join8.q.out b/ql/src/test/results/clientpositive/spark/auto_join8.q.out index a789e8cc0f..32331cfd11 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join8.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join8.q.out @@ -53,7 +53,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -76,7 +76,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out index 2b1b600ae1..24f7ec2f8e 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out @@ -337,7 +337,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 100.0) and value is not null) (type: boolean) + predicate: ((UDFToDouble(key) > 100.0D) and value is not null) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -355,7 +355,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out index 48574b94c8..a9bc746aee 100644 --- a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out @@ -493,7 +493,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(val) = 3.0) and key is not null) (type: boolean) + predicate: ((UDFToDouble(val) = 3.0D) and key is not null) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -511,7 +511,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(val) = 3.0) and key is not null) (type: boolean) + predicate: ((UDFToDouble(val) = 3.0D) and key is not null) (type: boolean) Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) @@ -589,7 +589,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 5.0) and val is not null) (type: boolean) + predicate: ((UDFToDouble(key) = 5.0D) and val is not null) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: val (type: string) @@ -606,7 +606,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) = 6.0) and val is not null) (type: boolean) + predicate: ((UDFToDouble(key) = 6.0D) and val is not null) (type: boolean) Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: val (type: string) diff --git a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out index d3439e42c2..cf78358e27 100644 --- a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out +++ b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out @@ -527,7 +527,7 @@ STAGE PLANS: alias: s2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -561,7 +561,7 @@ STAGE PLANS: alias: s4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -950,10 +950,10 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0.0D)) THEN (false) ELSE (((_col3 / _col4) > 1.0D)) END (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) + expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0D)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator @@ -1029,10 +1029,10 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0.0D)) THEN (false) ELSE (((_col3 / _col4) > 1.0D)) END (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) + expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0D)) THEN (null) ELSE ((_col3 / _col4)) END (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out index 71ccea5507..92bb775d50 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out @@ -42,7 +42,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -107,7 +107,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -198,7 +198,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -288,7 +288,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -367,7 +367,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -473,7 +473,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -591,7 +591,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE @@ -612,7 +612,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 1200 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out index bcc512be09..ed2ffbe471 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out @@ -428,7 +428,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 20.0) (type: boolean) + predicate: (UDFToDouble(key) <= 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -552,7 +552,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), substr(value, 5) (type: string) @@ -574,7 +574,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out index cf3721df04..eee15a3fc1 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out @@ -42,7 +42,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -127,7 +127,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -206,7 +206,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -306,7 +306,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT val) - keys: key (type: string), 0 (type: bigint), val (type: string) + keys: key (type: string), 0L (type: bigint), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: NONE @@ -424,7 +424,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE @@ -445,7 +445,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(1) - keys: key (type: string), val (type: string), 0 (type: bigint) + keys: key (type: string), val (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 900 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out index 45f3a978ea..d87ba73098 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out @@ -1235,7 +1235,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -4050,7 +4050,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 8.0) (type: boolean) + predicate: (UDFToDouble(key) = 8.0D) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out index 199db0c9ff..2fa8c3a228 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out @@ -1274,7 +1274,7 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -4199,7 +4199,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 8.0) (type: boolean) + predicate: (UDFToDouble(key) = 8.0D) (type: boolean) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) diff --git a/ql/src/test/results/clientpositive/spark/having.q.out b/ql/src/test/results/clientpositive/spark/having.q.out index 2876e67f06..84ee703d6e 100644 --- a/ql/src/test/results/clientpositive/spark/having.q.out +++ b/ql/src/test/results/clientpositive/spark/having.q.out @@ -47,7 +47,7 @@ STAGE PLANS: outputColumnNames: _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 > 3) (type: boolean) + predicate: (_col1 > 3L) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: bigint) @@ -106,7 +106,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <> 302.0) (type: boolean) + predicate: (UDFToDouble(key) <> 302.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) @@ -751,7 +751,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 300.0) (type: boolean) + predicate: (UDFToDouble(key) > 300.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) @@ -1240,7 +1240,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 >= 4) (type: boolean) + predicate: (_col1 >= 4L) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/spark/join11.q.out b/ql/src/test/results/clientpositive/spark/join11.q.out index 8e245e918f..aa682bcc72 100644 --- a/ql/src/test/results/clientpositive/spark/join11.q.out +++ b/ql/src/test/results/clientpositive/spark/join11.q.out @@ -31,7 +31,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -48,7 +48,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join12.q.out b/ql/src/test/results/clientpositive/spark/join12.q.out index efc8fc36bb..f3cfe0c8fc 100644 --- a/ql/src/test/results/clientpositive/spark/join12.q.out +++ b/ql/src/test/results/clientpositive/spark/join12.q.out @@ -37,7 +37,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -54,7 +54,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -71,7 +71,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 80.0) (type: boolean) + predicate: (UDFToDouble(key) < 80.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join13.q.out b/ql/src/test/results/clientpositive/spark/join13.q.out index 69d64e221d..8c2ce6141b 100644 --- a/ql/src/test/results/clientpositive/spark/join13.q.out +++ b/ql/src/test/results/clientpositive/spark/join13.q.out @@ -38,7 +38,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -56,7 +56,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -73,7 +73,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 200.0) (type: boolean) + predicate: (UDFToDouble(key) < 200.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join14.q.out b/ql/src/test/results/clientpositive/spark/join14.q.out index 11edde04be..463b1d54e0 100644 --- a/ql/src/test/results/clientpositive/spark/join14.q.out +++ b/ql/src/test/results/clientpositive/spark/join14.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -49,7 +49,7 @@ STAGE PLANS: alias: srcpart Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join16.q.out b/ql/src/test/results/clientpositive/spark/join16.q.out index b3ea6d45f7..cae2fb7f38 100644 --- a/ql/src/test/results/clientpositive/spark/join16.q.out +++ b/ql/src/test/results/clientpositive/spark/join16.q.out @@ -19,7 +19,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -36,7 +36,7 @@ STAGE PLANS: alias: tab Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean) + predicate: ((UDFToDouble(key) > 20.0D) and (UDFToDouble(value) < 200.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join34.q.out b/ql/src/test/results/clientpositive/spark/join34.q.out index 2c9fb99b9e..c2bdb9128d 100644 --- a/ql/src/test/results/clientpositive/spark/join34.q.out +++ b/ql/src/test/results/clientpositive/spark/join34.q.out @@ -46,7 +46,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -120,7 +120,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -194,7 +194,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out b/ql/src/test/results/clientpositive/spark/join35.q.out index 86ccc5adc1..eec056d890 100644 --- a/ql/src/test/results/clientpositive/spark/join35.q.out +++ b/ql/src/test/results/clientpositive/spark/join35.q.out @@ -48,7 +48,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -124,7 +124,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -200,7 +200,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (((UDFToDouble(key) < 20.0) or (UDFToDouble(key) > 100.0)) and key is not null) (type: boolean) + predicate: (((UDFToDouble(key) < 20.0D) or (UDFToDouble(key) > 100.0D)) and key is not null) (type: boolean) Statistics: Num rows: 16 Data size: 122 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join38.q.out b/ql/src/test/results/clientpositive/spark/join38.q.out index f4f31c0eb0..1bcf759d73 100644 --- a/ql/src/test/results/clientpositive/spark/join38.q.out +++ b/ql/src/test/results/clientpositive/spark/join38.q.out @@ -64,7 +64,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 2 Data size: 126 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(col11) = 111.0) (type: boolean) + predicate: (UDFToDouble(col11) = 111.0D) (type: boolean) Statistics: Num rows: 1 Data size: 63 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: col5 (type: string), col11 (type: string) @@ -89,7 +89,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 111.0) (type: boolean) + predicate: (UDFToDouble(key) = 111.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join39.q.out b/ql/src/test/results/clientpositive/spark/join39.q.out index c094dd9d35..e228165f1e 100644 --- a/ql/src/test/results/clientpositive/spark/join39.q.out +++ b/ql/src/test/results/clientpositive/spark/join39.q.out @@ -33,7 +33,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 100.0) (type: boolean) + predicate: (UDFToDouble(key) <= 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join4.q.out b/ql/src/test/results/clientpositive/spark/join4.q.out index 44ec2e052f..2396fd821c 100644 --- a/ql/src/test/results/clientpositive/spark/join4.q.out +++ b/ql/src/test/results/clientpositive/spark/join4.q.out @@ -54,7 +54,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join40.q.out b/ql/src/test/results/clientpositive/spark/join40.q.out index cd4004275d..03c00a979e 100644 --- a/ql/src/test/results/clientpositive/spark/join40.q.out +++ b/ql/src/test/results/clientpositive/spark/join40.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 100.0) (type: boolean) + predicate: (UDFToDouble(key) <= 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -3112,7 +3112,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) <= 100.0) (type: boolean) + predicate: (UDFToDouble(key) <= 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join41.q.out b/ql/src/test/results/clientpositive/spark/join41.q.out index 2099b7ce04..a2d2c4916d 100644 --- a/ql/src/test/results/clientpositive/spark/join41.q.out +++ b/ql/src/test/results/clientpositive/spark/join41.q.out @@ -48,7 +48,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 10.0) (type: boolean) + predicate: (UDFToDouble(key) > 10.0D) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -133,7 +133,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 10.0) (type: boolean) + predicate: (UDFToDouble(key) > 10.0D) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join5.q.out b/ql/src/test/results/clientpositive/spark/join5.q.out index 37a96d7b29..f9dfbece64 100644 --- a/ql/src/test/results/clientpositive/spark/join5.q.out +++ b/ql/src/test/results/clientpositive/spark/join5.q.out @@ -54,7 +54,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join6.q.out b/ql/src/test/results/clientpositive/spark/join6.q.out index 563c9407bd..3f884cab38 100644 --- a/ql/src/test/results/clientpositive/spark/join6.q.out +++ b/ql/src/test/results/clientpositive/spark/join6.q.out @@ -54,7 +54,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join7.q.out b/ql/src/test/results/clientpositive/spark/join7.q.out index 86838cc216..d43c5cdb84 100644 --- a/ql/src/test/results/clientpositive/spark/join7.q.out +++ b/ql/src/test/results/clientpositive/spark/join7.q.out @@ -64,7 +64,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -82,7 +82,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -100,7 +100,7 @@ STAGE PLANS: alias: src3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 25.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out index e6b7c1f0d8..a6b9c18504 100644 --- a/ql/src/test/results/clientpositive/spark/join8.q.out +++ b/ql/src/test/results/clientpositive/spark/join8.q.out @@ -54,7 +54,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 10.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 10.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/join_vc.q.out b/ql/src/test/results/clientpositive/spark/join_vc.q.out index 50762bdc16..39901eb733 100644 --- a/ql/src/test/results/clientpositive/spark/join_vc.q.out +++ b/ql/src/test/results/clientpositive/spark/join_vc.q.out @@ -162,7 +162,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -179,7 +179,7 @@ STAGE PLANS: alias: t2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out index a60d8ddbe3..82ba94a52b 100644 --- a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out @@ -187,7 +187,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -279,7 +279,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown2.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown2.q.out index 3bd49b4c77..62c26e6224 100644 --- a/ql/src/test/results/clientpositive/spark/limit_pushdown2.q.out +++ b/ql/src/test/results/clientpositive/spark/limit_pushdown2.q.out @@ -25,7 +25,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -125,7 +125,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -225,7 +225,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -325,7 +325,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -425,7 +425,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -525,7 +525,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -626,7 +626,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -738,7 +738,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: key (type: string), value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -850,7 +850,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -961,12 +961,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE @@ -1046,12 +1046,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out index 5c7e63cb31..8b8b334ce4 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part13.q.out @@ -71,7 +71,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 20.0) (type: boolean) + predicate: (UDFToDouble(key) < 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '22' (type: string) @@ -91,7 +91,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 40.0) and (UDFToDouble(key) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 40.0D) and (UDFToDouble(key) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), '33' (type: string) diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out index 0762fbaaf6..9649f73cd5 100644 --- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out @@ -35,7 +35,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -109,7 +109,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -341,7 +341,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -466,7 +466,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -651,7 +651,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -725,7 +725,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -957,7 +957,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -1082,7 +1082,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/mapjoin1.q.out b/ql/src/test/results/clientpositive/spark/mapjoin1.q.out index a8ef16479c..74c51bb02d 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin1.q.out @@ -141,7 +141,7 @@ STAGE PLANS: Spark HashTable Sink Operator filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -166,7 +166,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -241,7 +241,7 @@ STAGE PLANS: Spark HashTable Sink Operator filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -266,7 +266,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -433,7 +433,7 @@ STAGE PLANS: Spark HashTable Sink Operator filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -458,7 +458,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {((UDFToDouble(_col0) * 10.0) < 1000.0)} + 1 {((UDFToDouble(_col0) * 10.0D) < 1000.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -533,7 +533,7 @@ STAGE PLANS: Spark HashTable Sink Operator filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) @@ -558,7 +558,7 @@ STAGE PLANS: Right Outer Join 0 to 1 filter predicates: 0 - 1 {(UDFToDouble(_col1.key) > 200.0)} + 1 {(UDFToDouble(_col1.key) > 200.0D)} keys: 0 _col0 (type: string) 1 _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/spark/mergejoins.q.out b/ql/src/test/results/clientpositive/spark/mergejoins.q.out index ce03008f23..069a46868e 100644 --- a/ql/src/test/results/clientpositive/spark/mergejoins.q.out +++ b/ql/src/test/results/clientpositive/spark/mergejoins.q.out @@ -265,7 +265,7 @@ STAGE PLANS: Left Outer Join 1 to 2 filter predicates: 0 - 1 {(UDFToDouble(KEY.reducesinkkey0) < 10.0)} + 1 {(UDFToDouble(KEY.reducesinkkey0) < 10.0D)} 2 keys: 0 _col0 (type: string) diff --git a/ql/src/test/results/clientpositive/spark/nullgroup.q.out b/ql/src/test/results/clientpositive/spark/nullgroup.q.out index aa99ae34b4..9a98bfb0aa 100644 --- a/ql/src/test/results/clientpositive/spark/nullgroup.q.out +++ b/ql/src/test/results/clientpositive/spark/nullgroup.q.out @@ -21,7 +21,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -87,7 +87,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -154,7 +154,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE @@ -226,7 +226,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/nullgroup2.q.out b/ql/src/test/results/clientpositive/spark/nullgroup2.q.out index 1c6c505e6d..392f51c4a4 100644 --- a/ql/src/test/results/clientpositive/spark/nullgroup2.q.out +++ b/ql/src/test/results/clientpositive/spark/nullgroup2.q.out @@ -22,7 +22,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -103,7 +103,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -171,7 +171,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) @@ -245,7 +245,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 9999.0) (type: boolean) + predicate: (UDFToDouble(key) > 9999.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/nullgroup4.q.out b/ql/src/test/results/clientpositive/spark/nullgroup4.q.out index 061bd1be59..4aa4061a4a 100644 --- a/ql/src/test/results/clientpositive/spark/nullgroup4.q.out +++ b/ql/src/test/results/clientpositive/spark/nullgroup4.q.out @@ -22,7 +22,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -105,7 +105,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -194,7 +194,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -269,7 +269,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/nullgroup4_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/nullgroup4_multi_distinct.q.out index 7f5bfaab6d..66037c8d50 100644 --- a/ql/src/test/results/clientpositive/spark/nullgroup4_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/nullgroup4_multi_distinct.q.out @@ -21,7 +21,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), substr(value, 5) (type: string) @@ -91,7 +91,7 @@ STAGE PLANS: alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 9999.0) (type: boolean) + predicate: (UDFToDouble(key) = 9999.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), substr(value, 5) (type: string) diff --git a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out index c6f06add8a..73aeb4c850 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out @@ -40,10 +40,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) @@ -131,10 +131,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out index 5c3b25e035..f13cdd47f4 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out @@ -40,10 +40,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) @@ -178,10 +178,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: orc_merge5 - filterExpr: (userid <= 13) (type: boolean) + filterExpr: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out index d97ac66af3..7f9ce038a5 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out @@ -42,7 +42,7 @@ STAGE PLANS: alias: orc_merge5 Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (userid <= 13) (type: boolean) + predicate: (userid <= 13L) (type: boolean) Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out index 8bfd8b67a6..97cd138661 100644 --- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out @@ -35,7 +35,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -109,7 +109,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -341,7 +341,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -415,7 +415,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out index 1bbc2a0f5e..19e53178c5 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_0.q.out @@ -1642,7 +1642,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0 + _col0) (type: double), _col1 (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) + (-6432.0 + _col0)) (type: double), _col2 (type: double), (- (-6432.0 + _col0)) (type: double), (-6432.0 + (- (-6432.0 + _col0))) (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) / (- (-6432.0 + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0 + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) + expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0D + _col0) (type: double), _col1 (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) + (-6432.0D + _col0)) (type: double), _col2 (type: double), (- (-6432.0D + _col0)) (type: double), (-6432.0D + (- (-6432.0D + _col0))) (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) / (- (-6432.0D + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0D + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_1.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_1.q.out index 34b273c594..0d201f6321 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_1.q.out @@ -129,7 +129,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 / -26.28) (type: double), _col1 (type: double), (-1.389 + _col1) (type: double), (_col1 * (-1.389 + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389 + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175 % (- (_col1 * (-1.389 + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) + expressions: _col0 (type: double), (_col0 / -26.28D) (type: double), _col1 (type: double), (-1.389D + _col1) (type: double), (_col1 * (-1.389D + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389D + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175D % (- (_col1 * (-1.389D + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_10.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_10.q.out index 25ff960aea..c21e77fbb7 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_10.q.out @@ -72,10 +72,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringGroupColLessEqualStringScalar(col 7:string, val 10), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterDecimalScalarGreaterEqualDecimalColumn(val -5638.15, col 14:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(6,2))), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 6981.0), FilterExprOrExpr(children: FilterDecimalColEqualDecimalScalar(col 15:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(11,4)), FilterStringColLikeStringScalar(col 6:string, pattern %a)))) - predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) + predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0D) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) Statistics: Num rows: 9557 Data size: 114684 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639 - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) + expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0D) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639D - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_11.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_11.q.out index e2d8465709..aa8fc0f2db 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_11.q.out @@ -57,7 +57,7 @@ STAGE PLANS: predicate: ((cstring2 = cstring1) or (ctimestamp1 is null and (cstring1 like '%a'))) (type: boolean) Statistics: Num rows: 9216 Data size: 110592 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0) (type: double), (cdouble * -5638.15) (type: double) + expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639D) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0D) (type: double), (cdouble * -5638.15D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_12.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_12.q.out index 92cff3c2a3..c9973c2e99 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_12.q.out @@ -157,7 +157,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1877 Data size: 22524 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0 * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0 * _col0) / -6432.0) (type: double), (- ((-6432.0 * _col0) / -6432.0)) (type: double), _col6 (type: double), (- (-6432.0 * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0 * _col0)) (type: double), (- (- ((-6432.0 * _col0) / -6432.0))) (type: double), (((-6432.0 * _col0) / -6432.0) + (- (-6432.0 * _col0))) (type: double), _col8 (type: double) + expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0D * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0D * _col0) / -6432.0D) (type: double), (- ((-6432.0D * _col0) / -6432.0D)) (type: double), _col6 (type: double), (- (-6432.0D * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0D * _col0)) (type: double), (- (- ((-6432.0D * _col0) / -6432.0D))) (type: double), (((-6432.0D * _col0) / -6432.0D) + (- (-6432.0D * _col0))) (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19 Statistics: Num rows: 1877 Data size: 22524 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_13.q.out index 2251b11c8f..15848dade1 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_13.q.out @@ -93,7 +93,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 11.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 12.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > 11.0D) and (UDFToDouble(ctimestamp2) <> 12.0D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 32760 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -159,7 +159,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -416,7 +416,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -1.388)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val -1.3359999999999999)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > -1.388D) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 32760 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -474,7 +474,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_14.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_14.q.out index f5b2896102..752b45e0f1 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_14.q.out @@ -93,10 +93,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColLessEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterTimestampColLessTimestampColumn(col 9:timestamp, col 8:timestamp)), FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -257), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float))) - predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) + predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257L) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) Statistics: Num rows: 606 Data size: 7272 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -160,7 +160,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 303 Data size: 3636 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28 + _col2) (type: double), (- (-26.28 + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28 + _col2)) / 10.175) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28 + _col2)) / 10.175)) (type: double), (-1.389 % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) + expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175D) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 303 Data size: 3636 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_15.q.out index 97cc8de66b..73db3e779e 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_15.q.out @@ -89,7 +89,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %ss%), FilterStringColLikeStringScalar(col 6:string, pattern 10%), FilterExprAndExpr(children: FilterLongColGreaterEqualLongScalar(col 2:int, val -75), FilterLongColEqualLongColumn(col 0:smallint, col 1:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -3728.0))) - predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) + predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0D)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -154,7 +154,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 6144 Data size: 73728 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) + expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 6144 Data size: 73728 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_16.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_16.q.out index 94fd08c6a8..cb26fca0aa 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_16.q.out @@ -66,7 +66,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 49152 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -132,7 +132,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_17.q.out index a5ef4ae3f8..96fa66e330 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_17.q.out @@ -74,10 +74,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -23), FilterExprOrExpr(children: FilterDoubleColNotEqualDoubleScalar(col 5:double, val 988888.0), FilterDecimalColGreaterDecimalScalar(col 13:decimal(13,3), val -863.257)(children: CastLongToDecimal(col 2:int) -> 13:decimal(13,3))), FilterExprOrExpr(children: FilterLongColGreaterEqualLongScalar(col 0:tinyint, val 33), FilterLongColGreaterEqualLongColumn(col 1:bigint, col 3:bigint)(children: col 1:smallint), FilterDoubleColEqualDoubleColumn(col 4:double, col 5:double)(children: col 4:float))) - predicate: (((cdouble <> 988888.0) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23)) (type: boolean) + predicate: (((cdouble <> 988888.0D) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33Y) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23L)) (type: boolean) Statistics: Num rows: 4096 Data size: 49152 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58 + (- (- cdouble))) (type: double) + expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58D + (- (- cdouble))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_2.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_2.q.out index f8802e4453..206d1136e4 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_2.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_2.q.out @@ -72,7 +72,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessTimestampColumn(col 8:timestamp, col 9:timestamp), FilterStringColLikeStringScalar(col 7:string, pattern b%), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -5638.14990234375)), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterDoubleScalarNotEqualDoubleColumn(val -10669.0, col 13:double)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterLongScalarGreaterLongColumn(val 359, col 2:int)))) - predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0 <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) + predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0D <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) Statistics: Num rows: 4778 Data size: 57336 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cbigint (type: bigint), cfloat (type: float), cdouble (type: double) @@ -133,7 +133,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 % -563.0) (type: double), (_col0 + 762.0) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) + expressions: _col0 (type: double), (_col0 % -563.0D) (type: double), (_col0 + 762.0D) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0D) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_3.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_3.q.out index 91b98b42da..24caec0fc7 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_3.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_3.q.out @@ -77,7 +77,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 14:decimal(22,3))(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterDoubleColEqualDoubleScalar(col 13:double, val -29071.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterDecimalScalarLessEqualDecimalColumn(val 79.553, col 15:decimal(8,3))(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(8,3)), FilterTimestampColGreaterTimestampColumn(col 8:timestamp, col 9:timestamp))) - predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0))) (type: boolean) + predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0D))) (type: boolean) Statistics: Num rows: 2503 Data size: 30036 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float) @@ -138,7 +138,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 - 10.175) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175)) (type: double), (- _col1) (type: double), (_col0 % 79.553) (type: double), (- (_col0 * (_col0 - 10.175))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175))) / (_col0 - 10.175)) (type: double), (- (_col0 - 10.175)) (type: double), _col4 (type: double), (-3728.0 - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) + expressions: _col0 (type: double), (_col0 - 10.175D) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175D)) (type: double), (- _col1) (type: double), (_col0 % 79.553D) (type: double), (- (_col0 * (_col0 - 10.175D))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175D))) / (_col0 - 10.175D)) (type: double), (- (_col0 - 10.175D)) (type: double), _col4 (type: double), (-3728.0D - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_4.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_4.q.out index 1d0de537d9..15ad77c3ca 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_4.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_4.q.out @@ -72,7 +72,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterEqualLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterExprAndExpr(children: FilterLongScalarGreaterEqualLongColumn(val -89010, col 0:int)(children: col 0:tinyint), FilterDoubleColGreaterDoubleScalar(col 5:double, val 79.553)), FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -563, col 3:bigint), FilterExprOrExpr(children: FilterLongColNotEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterDoubleScalarGreaterEqualDoubleColumn(val -3728.0, col 5:double)))) - predicate: (((-563 <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0 >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) + predicate: (((-563L <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0D >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553D)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cdouble (type: double) @@ -133,7 +133,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: bigint), (_col0 * -563) (type: bigint), (-3728 + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2)) (type: double), ((-3728 + _col0) - (_col0 * -563)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2))) (type: double) + expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2)) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_5.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_5.q.out index 32d078ba0c..e3d77c338f 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_5.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_5.q.out @@ -143,7 +143,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0 % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0D % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_6.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_6.q.out index b74f56b9ef..a47754c852 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_6.q.out @@ -66,10 +66,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 10:boolean, val 0), FilterLongColGreaterEqualLongColumn(col 11:boolean, col 10:boolean)), FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:bigint), FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %a), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -257.0))))) - predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0)) (type: boolean) + predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 11605 Data size: 139260 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28 / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) + expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28D / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_7.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_7.q.out index f01e80854e..50617b1960 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_7.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_7.q.out @@ -80,7 +80,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -15.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 65532 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) @@ -327,7 +327,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 7.6850000000000005)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 65532 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_8.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_8.q.out index b4f17c465c..d01b32c5da 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_8.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_8.q.out @@ -76,10 +76,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 10.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 16.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0D) and (UDFToDouble(ctimestamp2) <> 16.0D))) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator @@ -310,10 +310,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 12.503)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 11.998)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503D) and (UDFToDouble(ctimestamp2) <> 11.998D))) (type: boolean) Statistics: Num rows: 12288 Data size: 147456 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_9.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_9.q.out index 94fd08c6a8..cb26fca0aa 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_9.q.out @@ -66,7 +66,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 49152 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -132,7 +132,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 24576 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_div0.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_div0.q.out index 5ba7587b89..76b4e3c1ad 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_div0.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_div0.q.out @@ -25,7 +25,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: (cdouble / 0.0) (type: double) + expressions: (cdouble / 0.0D) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -209,7 +209,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val 0), FilterLongColLessLongScalar(col 3:bigint, val 100000000)) - predicate: ((cbigint < 100000000) and (cbigint > 0)) (type: boolean) + predicate: ((cbigint < 100000000L) and (cbigint > 0L)) (type: boolean) Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (cbigint - 988888) (type: bigint), (cdouble / UDFToDouble((cbigint - 988888))) (type: double), (1.2 / CAST( (cbigint - 988888) AS decimal(19,0))) (type: decimal(22,21)) @@ -425,10 +425,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -500.0), FilterDoubleColLessDoubleScalar(col 5:double, val -199.0)) - predicate: ((cdouble < -199.0) and (cdouble >= -500.0)) (type: boolean) + predicate: ((cdouble < -199.0D) and (cdouble >= -500.0D)) (type: boolean) Statistics: Num rows: 1365 Data size: 16380 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 200.0) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0)) (type: double), ((cdouble + 200.0) / (cdouble + 200.0)) (type: double), (3.0 / (cdouble + 200.0)) (type: double), (1.2 / (cdouble + 200.0)) (type: double) + expressions: (cdouble + 200.0D) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0D)) (type: double), ((cdouble + 200.0D) / (cdouble + 200.0D)) (type: double), (3.0D / (cdouble + 200.0D)) (type: double), (1.2D / (cdouble + 200.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col4, _col5 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_limit.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_limit.q.out index ca696ab859..20d7e4e6fd 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_limit.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_limit.q.out @@ -248,7 +248,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct] Select Operator - expressions: ctinyint (type: tinyint), (cdouble + 1.0) (type: double) + expressions: ctinyint (type: tinyint), (cdouble + 1.0D) (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/parquet_vectorization_part_project.q.out b/ql/src/test/results/clientpositive/spark/parquet_vectorization_part_project.q.out index 944101ff0c..8c6beb52e6 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_vectorization_part_project.q.out @@ -71,7 +71,7 @@ STAGE PLANS: alias: alltypesparquet_part Statistics: Num rows: 200 Data size: 2400 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 2.0) (type: double) + expressions: (cdouble + 2.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 200 Data size: 2400 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out index 42ecd51322..a86147fb70 100644 --- a/ql/src/test/results/clientpositive/spark/pcr.q.out +++ b/ql/src/test/results/clientpositive/spark/pcr.q.out @@ -4139,7 +4139,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) = 11.0) (type: boolean) + predicate: (UDFToDouble(key) = 11.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) @@ -4328,7 +4328,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) = 11.0) (type: boolean) + predicate: (UDFToDouble(key) = 11.0D) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out index e0e6b31cf2..cf692edc21 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out @@ -76,7 +76,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean) + predicate: ((key <> '306') and (sqrt(key) <> 13.0D) and value is not null) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) @@ -1756,7 +1756,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean) + predicate: ((key <> '306') and (sqrt(key) <> 13.0D) and value is not null) (type: boolean) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out index b017647f69..59936bb636 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out @@ -233,10 +233,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -524,10 +524,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -815,10 +815,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -1106,10 +1106,10 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(_col1) + 1.0) < 5.0) (type: boolean) + predicate: ((UDFToDouble(_col1) + 1.0D) < 5.0D) (type: boolean) Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0) (type: double), (UDFToDouble(_col1) + 3.0) (type: double) + expressions: _col0 (type: string), (UDFToDouble(_col1) + 2.0D) (type: double), (UDFToDouble(_col1) + 3.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out index 4000f893e3..067f70c3a0 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out @@ -33,7 +33,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -148,7 +148,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -166,7 +166,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out index f7f911c35f..058029f969 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out @@ -39,7 +39,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -57,7 +57,7 @@ STAGE PLANS: alias: c Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -74,7 +74,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -413,7 +413,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -431,7 +431,7 @@ STAGE PLANS: alias: c Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -448,7 +448,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean) + predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out index f666940f33..322bae0616 100644 --- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out @@ -35,7 +35,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -109,7 +109,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -341,7 +341,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -466,7 +466,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -651,7 +651,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -725,7 +725,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -957,7 +957,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -1082,7 +1082,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 20.0D) and (UDFToDouble(key) > 15.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out index 79766b0c94..9ebb1c5fae 100644 --- a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out @@ -1024,10 +1024,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -1093,10 +1093,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -1241,10 +1241,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -1359,10 +1359,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1422,10 +1422,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1529,10 +1529,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -1884,17 +1884,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1934,19 +1934,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: @@ -1955,7 +1955,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -2024,10 +2024,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2044,7 +2044,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Spark Partition Pruning Sink Operator Target column: [1:hr (string)] - partition key expr: [(UDFToDouble(hr) * 2.0)] + partition key expr: [(UDFToDouble(hr) * 2.0D)] Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE target works: [Map 1] @@ -2066,18 +2066,18 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2094,7 +2094,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2181,19 +2181,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: @@ -2202,7 +2202,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -2280,18 +2280,18 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2308,7 +2308,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2387,10 +2387,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2407,7 +2407,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Spark Partition Pruning Sink Operator Target column: [1:hr (string)] - partition key expr: [UDFToString((UDFToDouble(hr) * 2.0))] + partition key expr: [UDFToString((UDFToDouble(hr) * 2.0D))] Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE target works: [Map 1] @@ -2429,18 +2429,18 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + key expressions: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) sort order: + - Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -2457,7 +2457,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + 0 UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) 1 UDFToString(_col0) (type: string) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -2673,10 +2673,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D)) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08')) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08')) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -2769,10 +2769,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -2832,10 +2832,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -3312,10 +3312,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3381,10 +3381,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3491,10 +3491,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -3509,10 +3509,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -4729,10 +4729,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -4875,10 +4875,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -5145,10 +5145,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -5157,9 +5157,9 @@ STAGE PLANS: Spark HashTable Sink Operator keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -5196,7 +5196,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) input vertices: 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -5269,10 +5269,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -5280,7 +5280,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Spark HashTable Sink Operator keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Select Operator expressions: _col0 (type: double) @@ -5293,7 +5293,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Spark Partition Pruning Sink Operator Target column: [1:hr (string)] - partition key expr: [(UDFToDouble(hr) * 2.0)] + partition key expr: [(UDFToDouble(hr) * 2.0D)] Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE target works: [Map 1] Local Work: @@ -5319,7 +5319,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) input vertices: 1 Map 3 @@ -5851,10 +5851,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -5987,10 +5987,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -6011,10 +6011,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -6436,10 +6436,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 54 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out index 0f7ad17f75..cd566bbb1b 100644 --- a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out @@ -94,7 +94,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((11.0 = 11.0) and ds is not null) (type: boolean) + filterExpr: ((11.0D = 11.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -110,10 +110,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart2 - filterExpr: (UDFToDouble(hr) = 11.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 2000 Data size: 49248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 11.0) (type: boolean) + predicate: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 1000 Data size: 24624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -194,10 +194,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart2 - filterExpr: (UDFToDouble(hr) = 11.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 2000 Data size: 49248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 11.0) (type: boolean) + predicate: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 1000 Data size: 24624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -281,10 +281,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart2 - filterExpr: (UDFToDouble(hr) = 11.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 2000 Data size: 49248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 11.0) (type: boolean) + predicate: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 1000 Data size: 24624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) @@ -419,10 +419,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart2 - filterExpr: (UDFToDouble(hr) = 11.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 2000 Data size: 49248 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(hr) = 11.0) (type: boolean) + predicate: (UDFToDouble(hr) = 11.0D) (type: boolean) Statistics: Num rows: 1000 Data size: 24624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) diff --git a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out index ebd8d8692c..1e5d4569cf 100644 --- a/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_explainuser_1.q.out @@ -445,7 +445,7 @@ Stage-0 Select Operator [SEL_26] (rows=1 width=20) Output:["_col1","_col4"] Filter Operator [FIL_23] (rows=1 width=20) - predicate:(((UDFToLong(_col1) + _col4) >= 0) and ((_col1 >= 1) or (_col4 >= 1)) and ((_col3 + _col6) >= 0)) + predicate:(((UDFToLong(_col1) + _col4) >= 0) and ((_col1 >= 1) or (_col4 >= 1L)) and ((_col3 + _col6) >= 0)) Join Operator [JOIN_22] (rows=3 width=18) Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"} <-Map 1 [PARTITION-LEVEL SORT] @@ -596,7 +596,7 @@ Stage-0 Select Operator [SEL_26] (rows=1 width=20) Output:["_col1","_col4"] Filter Operator [FIL_23] (rows=1 width=20) - predicate:(((UDFToLong(_col1) + _col4) >= 0) and ((_col1 >= 1) or (_col4 >= 1)) and ((_col3 + _col6) >= 0)) + predicate:(((UDFToLong(_col1) + _col4) >= 0) and ((_col1 >= 1) or (_col4 >= 1L)) and ((_col3 + _col6) >= 0)) Join Operator [JOIN_22] (rows=3 width=18) Output:["_col1","_col3","_col4","_col6"],condition map:[{"":"{\"type\":\"Inner\",\"left\":0,\"right\":1}"},{"":"{\"type\":\"Inner\",\"left\":1,\"right\":2}"}],keys:{"0":"_col0","1":"_col0","2":"_col0"} <-Map 1 [PARTITION-LEVEL SORT] @@ -903,7 +903,7 @@ Stage-0 Select Operator [SEL_2] (rows=6 width=85) Output:["_col0"] Filter Operator [FIL_11] (rows=6 width=85) - predicate:(UDFToDouble(key) >= 1.0) + predicate:(UDFToDouble(key) >= 1.0D) TableScan [TS_0] (rows=20 width=80) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 3 [PARTITION-LEVEL SORT] @@ -912,7 +912,7 @@ Stage-0 Select Operator [SEL_5] (rows=6 width=85) Output:["_col0"] Filter Operator [FIL_12] (rows=6 width=85) - predicate:(UDFToDouble(key) >= 1.0) + predicate:(UDFToDouble(key) >= 1.0D) TableScan [TS_3] (rows=20 width=80) default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] @@ -1545,7 +1545,7 @@ Stage-0 Select Operator [SEL_19] (rows=6 width=85) Output:["_col0"] Filter Operator [FIL_38] (rows=6 width=85) - predicate:(UDFToDouble(key) > 0.0) + predicate:(UDFToDouble(key) > 0.0D) TableScan [TS_17] (rows=20 width=80) default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Reducer 2 [PARTITION-LEVEL SORT] @@ -1554,7 +1554,7 @@ Stage-0 Select Operator [SEL_8] (rows=1 width=93) Output:["_col0","_col1"] Filter Operator [FIL_7] (rows=1 width=101) - predicate:(((UDFToDouble(_col2) + UDFToDouble(_col3)) >= 0.0) and ((UDFToDouble(_col2) >= 1.0) or (_col3 >= 1))) + predicate:(((UDFToDouble(_col2) + UDFToDouble(_col3)) >= 0.0D) and ((UDFToDouble(_col2) >= 1.0D) or (_col3 >= 1L))) Select Operator [SEL_6] (rows=1 width=101) Output:["_col2","_col3"] Group By Operator [GBY_5] (rows=1 width=101) @@ -1565,7 +1565,7 @@ Stage-0 Group By Operator [GBY_3] (rows=1 width=101) Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float Filter Operator [FIL_36] (rows=1 width=93) - predicate:((((c_int + 1) + 1) >= 0) and (((c_int + 1) > 0) or (UDFToDouble(key) >= 0.0)) and ((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0) and (c_float > 0)) + predicate:((((c_int + 1) + 1) >= 0) and (((c_int + 1) > 0) or (UDFToDouble(key) >= 0.0D)) and ((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0D) and (c_float > 0)) TableScan [TS_0] (rows=20 width=88) default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] <-Reducer 7 [PARTITION-LEVEL SORT] @@ -1583,7 +1583,7 @@ Stage-0 Group By Operator [GBY_12] (rows=1 width=93) Output:["_col0","_col1","_col2"],keys:key, c_int, c_float Filter Operator [FIL_37] (rows=1 width=93) - predicate:(((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0) and (c_float > 0)) + predicate:(((UDFToFloat(c_int) + c_float) >= 0) and ((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0)) and ((c_int >= 1) or (c_float >= 1)) and (UDFToDouble(key) > 0.0D) and (c_float > 0)) TableScan [TS_9] (rows=20 width=88) default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"] @@ -2201,7 +2201,7 @@ Stage-0 Select Operator [SEL_23] (rows=500 width=178) Output:["_col0","_col1"] Filter Operator [FIL_22] (rows=500 width=195) - predicate:((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) + predicate:((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) Join Operator [JOIN_21] (rows=500 width=195) Output:["_col0","_col1","_col2","_col3","_col5"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0","1":"_col0"} <-Reducer 2 [PARTITION-LEVEL SORT] @@ -2277,7 +2277,7 @@ Stage-0 Select Operator [SEL_25] (rows=13 width=223) Output:["_col0","_col1","_col2"] Filter Operator [FIL_24] (rows=13 width=231) - predicate:(not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) + predicate:(not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) Join Operator [JOIN_23] (rows=26 width=230) Output:["_col0","_col1","_col2","_col4","_col5","_col8"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0, _col1","1":"_col0, _col1"} <-Reducer 2 [PARTITION-LEVEL SORT] @@ -2367,7 +2367,7 @@ Stage-0 Select Operator [SEL_28] (rows=26 width=125) Output:["_col0","_col1"] Filter Operator [FIL_27] (rows=26 width=141) - predicate:((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) + predicate:((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) Join Operator [JOIN_26] (rows=26 width=141) Output:["_col0","_col1","_col2","_col3","_col5"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"UDFToDouble(_col1)","1":"_col0"} <-Reducer 2 [PARTITION-LEVEL SORT] @@ -2448,7 +2448,7 @@ Stage-0 Select Operator [SEL_34] (rows=3 width=106) Output:["_col0","_col1"] Filter Operator [FIL_33] (rows=3 width=119) - predicate:(not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) + predicate:(not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) Join Operator [JOIN_32] (rows=5 width=114) Output:["_col0","_col1","_col3","_col4","_col7"],condition map:[{"":"{\"type\":\"Left Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"_col0, _col1","1":"_col0, _col1"} <-Reducer 10 [PARTITION-LEVEL SORT] @@ -2461,7 +2461,7 @@ Stage-0 Select Operator [SEL_24] (rows=1 width=110) Output:["_col0","_col1"] Filter Operator [FIL_23] (rows=1 width=114) - predicate:(((_col2 - _col1) > 600.0) and _col1 is not null) + predicate:(((_col2 - _col1) > 600.0D) and _col1 is not null) Group By Operator [GBY_22] (rows=5 width=114) Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)"],keys:KEY._col0 <-Map 6 [GROUP] @@ -2505,7 +2505,7 @@ Stage-0 Select Operator [SEL_12] (rows=1 width=114) Output:["_col0","_col1"] Filter Operator [FIL_11] (rows=1 width=114) - predicate:(((_col2 - _col1) > 600.0) and _col1 is not null) + predicate:(((_col2 - _col1) > 600.0D) and _col1 is not null) Group By Operator [GBY_10] (rows=5 width=114) Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)"],keys:KEY._col0 <- Please refer to the previous Map 6 [GROUP] @@ -5447,7 +5447,7 @@ Stage-0 Select Operator [SEL_9] (rows=550 width=87) Output:["_col0","_col1","_col2"] Map Join Operator [MAPJOIN_19] (rows=550 width=87) - Conds:SEL_5.UDFToDouble(_col0)=SEL_5.(UDFToDouble(_col0) + 1.0)(Inner),Output:["_col0","_col1","_col2"] + Conds:SEL_5.UDFToDouble(_col0)=SEL_5.(UDFToDouble(_col0) + 1.0D)(Inner),Output:["_col0","_col1","_col2"] <-Select Operator [SEL_5] (rows=500 width=87) Output:["_col0"] Filter Operator [FIL_18] (rows=500 width=87) @@ -5458,7 +5458,7 @@ Stage-0 Stage-2 Map 1 keys: [HASHTABLESINK_21] - 0UDFToDouble(_col0),1(UDFToDouble(_col0) + 1.0) + 0UDFToDouble(_col0),1(UDFToDouble(_col0) + 1.0D) Select Operator [SEL_2] (rows=1 width=368) Output:["_col0","_col1"] Filter Operator [FIL_17] (rows=1 width=368) @@ -5711,7 +5711,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=404) Output:["_col0","_col1","_col2"] Map Join Operator [MAPJOIN_15] (rows=1 width=404) - Conds:SEL_1.UDFToDouble(_col0)=SEL_1.(UDFToDouble(_col0) + 1.0)(Left Outer),Output:["_col0","_col1","_col2"] + Conds:SEL_1.UDFToDouble(_col0)=SEL_1.(UDFToDouble(_col0) + 1.0D)(Left Outer),Output:["_col0","_col1","_col2"] <-Select Operator [SEL_1] (rows=1 width=368) Output:["_col0","_col1"] TableScan [TS_0] (rows=1 width=368) @@ -5720,7 +5720,7 @@ Stage-0 Stage-2 Map 4 keys: [HASHTABLESINK_17] - 0UDFToDouble(_col0),1(UDFToDouble(_col0) + 1.0) + 0UDFToDouble(_col0),1(UDFToDouble(_col0) + 1.0D) Select Operator [SEL_3] (rows=1 width=184) Output:["_col0"] TableScan [TS_2] (rows=1 width=184) @@ -5755,7 +5755,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=404) Output:["_col0","_col1","_col2"] Map Join Operator [MAPJOIN_15] (rows=1 width=404) - Conds:SEL_3.UDFToDouble(_col0)=SEL_3.(UDFToDouble(_col0) + 1.0)(Right Outer),Output:["_col0","_col1","_col2"] + Conds:SEL_3.UDFToDouble(_col0)=SEL_3.(UDFToDouble(_col0) + 1.0D)(Right Outer),Output:["_col0","_col1","_col2"] <-Select Operator [SEL_3] (rows=1 width=184) Output:["_col0"] TableScan [TS_2] (rows=1 width=184) @@ -5764,7 +5764,7 @@ Stage-0 Stage-2 Map 1 keys: [HASHTABLESINK_17] - 0UDFToDouble(_col0),1(UDFToDouble(_col0) + 1.0) + 0UDFToDouble(_col0),1(UDFToDouble(_col0) + 1.0D) Select Operator [SEL_1] (rows=1 width=368) Output:["_col0","_col1"] TableScan [TS_0] (rows=1 width=368) @@ -5800,7 +5800,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=404) Output:["_col0","_col1","_col2"] Join Operator [JOIN_6] (rows=1 width=404) - Output:["_col0","_col1","_col2"],condition map:[{"":"{\"type\":\"Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"UDFToDouble(_col0)","1":"(UDFToDouble(_col0) + 1.0)"} + Output:["_col0","_col1","_col2"],condition map:[{"":"{\"type\":\"Outer\",\"left\":0,\"right\":1}"}],keys:{"0":"UDFToDouble(_col0)","1":"(UDFToDouble(_col0) + 1.0D)"} <-Map 1 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_4] PartitionCols:UDFToDouble(_col0) @@ -5810,7 +5810,7 @@ Stage-0 default@t1,a,Tbl:COMPLETE,Col:NONE,Output:["key","val"] <-Map 5 [PARTITION-LEVEL SORT] PARTITION-LEVEL SORT [RS_5] - PartitionCols:(UDFToDouble(_col0) + 1.0) + PartitionCols:(UDFToDouble(_col0) + 1.0D) Select Operator [SEL_3] (rows=1 width=184) Output:["_col0"] TableScan [TS_2] (rows=1 width=184) @@ -5844,7 +5844,7 @@ Stage-0 Select Operator [SEL_7] (rows=1 width=202) Output:["_col0","_col1"] Map Join Operator [MAPJOIN_15] (rows=1 width=202) - Conds:SEL_1.(UDFToDouble(_col0) + 1.0)=SEL_1.UDFToDouble(_col0)(Left Outer),Output:["_col0","_col2"] + Conds:SEL_1.(UDFToDouble(_col0) + 1.0D)=SEL_1.UDFToDouble(_col0)(Left Outer),Output:["_col0","_col2"] <-Select Operator [SEL_1] (rows=1 width=184) Output:["_col0"] TableScan [TS_0] (rows=1 width=184) @@ -5853,7 +5853,7 @@ Stage-0 Stage-2 Map 4 keys: [HASHTABLESINK_17] - 0(UDFToDouble(_col0) + 1.0),1UDFToDouble(_col0) + 0(UDFToDouble(_col0) + 1.0D),1UDFToDouble(_col0) Select Operator [SEL_3] (rows=1 width=368) Output:["_col0","_col1"] TableScan [TS_2] (rows=1 width=368) diff --git a/ql/src/test/results/clientpositive/spark/spark_union_merge.q.out b/ql/src/test/results/clientpositive/spark/spark_union_merge.q.out index f6afd8be05..16f1277636 100644 --- a/ql/src/test/results/clientpositive/spark/spark_union_merge.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_union_merge.q.out @@ -31,7 +31,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -115,7 +115,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -257,7 +257,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -341,7 +341,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/spark_use_op_stats.q.out b/ql/src/test/results/clientpositive/spark/spark_use_op_stats.q.out index cd7c6ddb19..4a5c873174 100644 --- a/ql/src/test/results/clientpositive/spark/spark_use_op_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_use_op_stats.q.out @@ -25,7 +25,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -42,7 +42,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -156,7 +156,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 150.0) (type: boolean) + predicate: (UDFToDouble(key) > 150.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -174,7 +174,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 150.0) (type: boolean) + predicate: (UDFToDouble(key) > 150.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -191,7 +191,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 148 Data size: 1542 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 150.0) (type: boolean) + predicate: (UDFToDouble(key) > 150.0D) (type: boolean) Statistics: Num rows: 49 Data size: 510 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -208,7 +208,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 148 Data size: 1542 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 150.0) (type: boolean) + predicate: (UDFToDouble(key) > 150.0D) (type: boolean) Statistics: Num rows: 49 Data size: 510 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out index a1bc1a9379..6bf28f6896 100644 --- a/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_use_ts_stats_for_mapjoin.q.out @@ -25,7 +25,7 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -42,7 +42,7 @@ STAGE PLANS: alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out index d359c36591..cdfd3e36a9 100644 --- a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out @@ -1878,7 +1878,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -1888,7 +1888,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:string)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -2048,7 +2048,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -2058,7 +2058,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:string)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -2330,7 +2330,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -2340,7 +2340,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:string)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -2519,7 +2519,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -2529,7 +2529,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 0:string), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -2661,7 +2661,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -2671,7 +2671,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 0:string), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -2864,7 +2864,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -2874,7 +2874,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 0:string), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -3515,7 +3515,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -3525,7 +3525,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -3536,7 +3536,7 @@ STAGE PLANS: projectedOutputColumnNums: [0] Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -3635,7 +3635,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -3645,7 +3645,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -3656,9 +3656,9 @@ STAGE PLANS: projectedOutputColumnNums: [0] Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [3] @@ -3695,7 +3695,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -3792,7 +3792,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -3802,7 +3802,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -3834,7 +3834,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Spark Partition Pruning Sink Operator Target column: [1:hr (string)] - partition key expr: [(UDFToDouble(hr) * 2.0)] + partition key expr: [(UDFToDouble(hr) * 2.0D)] Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE target works: [Map 1] Execution mode: vectorized @@ -3879,9 +3879,9 @@ STAGE PLANS: projectedOutputColumnNums: [3] Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [6] @@ -3911,7 +3911,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -3921,7 +3921,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -3969,7 +3969,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -4115,7 +4115,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -4125,7 +4125,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -4136,9 +4136,9 @@ STAGE PLANS: projectedOutputColumnNums: [0] Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) sort order: + - Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [3] @@ -4175,7 +4175,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -4288,9 +4288,9 @@ STAGE PLANS: projectedOutputColumnNums: [3] Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: (UDFToDouble(_col0) * 2.0) (type: double) + key expressions: (UDFToDouble(_col0) * 2.0D) (type: double) sort order: + - Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double) + Map-reduce partition columns: (UDFToDouble(_col0) * 2.0D) (type: double) Reduce Sink Vectorization: className: VectorReduceSinkMultiKeyOperator keyColumnNums: [6] @@ -4320,7 +4320,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -4330,7 +4330,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -4378,7 +4378,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -4485,7 +4485,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -4495,7 +4495,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -4528,7 +4528,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Spark Partition Pruning Sink Operator Target column: [1:hr (string)] - partition key expr: [UDFToString((UDFToDouble(hr) * 2.0))] + partition key expr: [UDFToString((UDFToDouble(hr) * 2.0D))] Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE target works: [Map 1] Execution mode: vectorized @@ -4573,9 +4573,9 @@ STAGE PLANS: projectedOutputColumnNums: [3] Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + key expressions: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) sort order: + - Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) Reduce Sink Vectorization: className: VectorReduceSinkStringOperator keyColumnNums: [7] @@ -4605,7 +4605,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -4615,7 +4615,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -4664,7 +4664,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string) + 0 UDFToString((UDFToDouble(_col0) * 2.0D)) (type: string) 1 UDFToString(_col0) (type: string) Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -5069,7 +5069,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D)) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -5079,7 +5079,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08')) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08')) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -5231,7 +5231,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -5241,7 +5241,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 0:string), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -5373,7 +5373,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -5383,7 +5383,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 0:string), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -6359,7 +6359,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -6369,7 +6369,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 0:string) -> 3:double)) - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -6529,7 +6529,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -6539,7 +6539,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 0:string) -> 3:double)) - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -6710,7 +6710,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -6720,7 +6720,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColEqualDoubleScalar(col 3:double, val 13.0)(children: CastStringToDouble(col 0:string) -> 3:double) - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -6761,7 +6761,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE TableScan Vectorization: native: true @@ -6771,7 +6771,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 5:double, val 13.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -9473,7 +9473,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -9483,7 +9483,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:string)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -9738,7 +9738,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_date_hour - filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean) + filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0D) and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -9748,7 +9748,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringGroupColEqualStringScalar(col 1:string, val 2008-04-08), FilterDoubleColEqualDoubleScalar(col 5:double, val 11.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 0:string), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hour) = 11.0) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (date = '2008-04-08') and ds is not null and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -10241,7 +10241,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -10251,7 +10251,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -10267,9 +10267,9 @@ STAGE PLANS: native: true keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Select Operator - expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + expressions: UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -10341,7 +10341,7 @@ STAGE PLANS: Inner Join 0 to 1 keys: 0 UDFToDouble(_col0) (type: double) - 1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double) + 1 UDFToDouble(UDFToInteger((_col0 / 2.0D))) (type: double) Map Join Vectorization: bigTableKeyColumnNums: [5] bigTableKeyExpressions: CastStringToDouble(col 3:string) -> 5:double @@ -10478,7 +10478,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_double_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -10488,7 +10488,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), SelectColumnIsNotNull(col 0:double)) - predicate: ((UDFToDouble(hour) = 11.0) and hr is not null) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and hr is not null) (type: boolean) Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: double) @@ -10503,7 +10503,7 @@ STAGE PLANS: className: VectorSparkHashTableSinkOperator native: true keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Select Operator expressions: _col0 (type: double) @@ -10527,7 +10527,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE Spark Partition Pruning Sink Operator Target column: [1:hr (string)] - partition key expr: [(UDFToDouble(hr) * 2.0)] + partition key expr: [(UDFToDouble(hr) * 2.0D)] Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE target works: [Map 1] Execution mode: vectorized @@ -10576,7 +10576,7 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 (UDFToDouble(_col0) * 2.0) (type: double) + 0 (UDFToDouble(_col0) * 2.0D) (type: double) 1 _col0 (type: double) Map Join Vectorization: bigTableKeyColumnNums: [6] @@ -11658,7 +11658,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + filterExpr: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -11668,7 +11668,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 1:string) -> 3:double), FilterDoubleColEqualDoubleScalar(col 3:double, val 11.0)(children: CastStringToDouble(col 0:string) -> 3:double)) - predicate: ((UDFToDouble(hour) = 11.0) and (UDFToDouble(hr) = 11.0)) (type: boolean) + predicate: ((UDFToDouble(hour) = 11.0D) and (UDFToDouble(hr) = 11.0D)) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) @@ -11912,7 +11912,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - filterExpr: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + filterExpr: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE TableScan Vectorization: native: true @@ -11922,7 +11922,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColEqualDoubleScalar(col 5:double, val 13.0)(children: CastStringToDouble(col 3:string) -> 5:double), SelectColumnIsNotNull(col 2:string)) - predicate: ((UDFToDouble(hr) = 13.0) and ds is not null) (type: boolean) + predicate: ((UDFToDouble(hr) = 13.0D) and ds is not null) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: ds (type: string), hr (type: string) @@ -12015,7 +12015,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart_hour - filterExpr: (UDFToDouble(hr) = 13.0) (type: boolean) + filterExpr: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true @@ -12025,7 +12025,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterDoubleColEqualDoubleScalar(col 3:double, val 13.0)(children: CastStringToDouble(col 0:string) -> 3:double) - predicate: (UDFToDouble(hr) = 13.0) (type: boolean) + predicate: (UDFToDouble(hr) = 13.0D) (type: boolean) Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) diff --git a/ql/src/test/results/clientpositive/spark/subquery_in.q.out b/ql/src/test/results/clientpositive/spark/subquery_in.q.out index 5e48a5cb7d..7df930039a 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_in.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_in.q.out @@ -4635,7 +4635,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col16 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col12 = 0)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col12 = 0L)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4873,7 +4873,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col16 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col12 = 0)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col12 = 0L)) THEN (false) WHEN (_col12 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col13 < _col12)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) diff --git a/ql/src/test/results/clientpositive/spark/subquery_multi.q.out b/ql/src/test/results/clientpositive/spark/subquery_multi.q.out index 3764d92b3b..32e0983bf4 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_multi.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_multi.q.out @@ -393,7 +393,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 1 Data size: 39416 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 1 Data size: 39416 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -597,7 +597,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 1 Data size: 39416 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 1 Data size: 39416 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -826,7 +826,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col13 Statistics: Num rows: 1 Data size: 39416 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col10 = 0) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) + predicate: ((_col10 = 0L) or (_col13 is null and _col3 is not null and (_col11 >= _col10))) (type: boolean) Statistics: Num rows: 1 Data size: 39416 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -1000,7 +1000,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 1 Data size: 35834 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col1 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col1 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 1 Data size: 35834 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -1823,7 +1823,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 33 Data size: 4187 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 17 Data size: 2156 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2306,7 +2306,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 33 Data size: 4187 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 17 Data size: 2156 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2538,7 +2538,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 33 Data size: 4187 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 17 Data size: 2156 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2993,7 +2993,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2, _col4, _col5, _col7 Statistics: Num rows: 27 Data size: 5746 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col4 = 0) or (_col7 is null and _col2 is not null and (_col5 >= _col4))) (type: boolean) + predicate: ((_col4 = 0L) or (_col7 is null and _col2 is not null and (_col5 >= _col4))) (type: boolean) Statistics: Num rows: 17 Data size: 3617 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), 1 (type: int) @@ -3836,7 +3836,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col0 = 3) or CASE WHEN ((_col9 = 0)) THEN (true) WHEN (_col12 is not null) THEN (false) WHEN (_col5 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (true) END) (type: boolean) + predicate: ((_col0 = 3) or CASE WHEN ((_col9 = 0L)) THEN (true) WHEN (_col12 is not null) THEN (false) WHEN (_col5 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (true) END) (type: boolean) Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4043,7 +4043,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col4, _col5 Statistics: Num rows: 550 Data size: 13543 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col4 is not null and (_col2 <> 0)) or _col1 is not null or _col5 is not null) (type: boolean) + predicate: ((_col4 is not null and (_col2 <> 0L)) or _col1 is not null or _col5 is not null) (type: boolean) Statistics: Num rows: 550 Data size: 13543 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 550 Data size: 13543 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out index 5b4aa49900..1244c99fee 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out @@ -233,12 +233,12 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = 0) (type: boolean) + predicate: (_col0 = 0L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: 0 (type: bigint) + keys: 0L (type: bigint) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE @@ -576,12 +576,12 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = 0) (type: boolean) + predicate: (_col0 = 0L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: 0 (type: bigint) + keys: 0L (type: bigint) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/spark/subquery_notin.q.out b/ql/src/test/results/clientpositive/spark/subquery_notin.q.out index e2f26a977f..82a1304a93 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_notin.q.out @@ -104,7 +104,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 550 Data size: 15193 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -377,7 +377,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int) @@ -645,7 +645,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 28 Data size: 5892 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 18 Data size: 3787 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: int) @@ -1036,7 +1036,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col10 Statistics: Num rows: 9 Data size: 1345 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col6 = 0)) THEN (false) WHEN (_col6 is null) THEN (false) WHEN (_col10 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col7 < _col6)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col6 = 0L)) THEN (false) WHEN (_col6 is null) THEN (false) WHEN (_col10 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col7 < _col6)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 5 Data size: 747 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int) @@ -1513,7 +1513,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 182 Data size: 5043 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 121 Data size: 3352 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -1685,7 +1685,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col5 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -1874,7 +1874,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 18 Data size: 2537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2066,7 +2066,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col0 is not null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col0 is not null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 18 Data size: 2537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2324,7 +2324,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col7 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE @@ -2496,7 +2496,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col7 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col7 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 18 Data size: 2537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2762,7 +2762,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col14 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col10 = 0)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN (_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2960,7 +2960,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col16 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col11 = 0)) THEN (false) WHEN (_col11 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col12 < _col11)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col11 = 0L)) THEN (false) WHEN (_col11 is null) THEN (false) WHEN (_col16 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col12 < _col11)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -3068,7 +3068,7 @@ STAGE PLANS: predicate: p_brand is not null (type: boolean) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: p_brand (type: string), (UDFToDouble(p_type) + 2.0) (type: double) + expressions: p_brand (type: string), (UDFToDouble(p_type) + 2.0D) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -3092,7 +3092,7 @@ STAGE PLANS: predicate: p_brand is not null (type: boolean) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(p_type) + 2.0) (type: double), p_brand (type: string) + expressions: (UDFToDouble(p_type) + 2.0D) (type: double), p_brand (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -3132,7 +3132,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col4, _col5, _col8 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -3359,7 +3359,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col4, _col5, _col8 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -3611,7 +3611,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 550 Data size: 15193 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -3898,7 +3898,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col7 Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 303 Data size: 3218 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -4134,7 +4134,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 18 Data size: 2537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4344,7 +4344,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col12 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0)) (type: boolean) + predicate: ((_col12 is null and _col5 is not null and (_col10 >= _col9)) or (_col9 = 0L)) (type: boolean) Statistics: Num rows: 18 Data size: 2537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -4579,7 +4579,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 550 Data size: 15193 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -5271,7 +5271,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col7 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN ((_col1 + 100) is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE @@ -5492,7 +5492,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 4 Data size: 419 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 2 Data size: 209 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -5717,7 +5717,7 @@ STAGE PLANS: outputColumnNames: _col0, _col3, _col4, _col7 Statistics: Num rows: 4 Data size: 378 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 189 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -5948,7 +5948,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col7 Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -6172,7 +6172,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col6 Statistics: Num rows: 4 Data size: 19 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 9 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -6400,7 +6400,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col6 Statistics: Num rows: 6 Data size: 22 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col6 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 3 Data size: 11 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -6588,7 +6588,7 @@ STAGE PLANS: outputColumnNames: _col0, _col3, _col4, _col7 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -6747,7 +6747,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 3 Data size: 67 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col1 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 1 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -6902,7 +6902,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 3 Data size: 67 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 1 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -7112,7 +7112,7 @@ STAGE PLANS: residual filter predicates: {(_col1 > _col6)} Statistics: Num rows: 8367 Data size: 186148 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 4184 Data size: 93085 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/spark/subquery_null_agg.q.out b/ql/src/test/results/clientpositive/spark/subquery_null_agg.q.out index 4cfce1033c..96e8ee2f90 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_null_agg.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_null_agg.q.out @@ -134,7 +134,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2 Statistics: Num rows: 1 Data size: 10 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: ((_col1 = 0) or _col2 is null) (type: boolean) + predicate: ((_col1 = 0L) or _col2 is null) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: null (type: void) diff --git a/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out b/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out index 77359a0197..7488f2e02e 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out @@ -2795,7 +2795,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) @@ -2863,7 +2863,7 @@ STAGE PLANS: outputColumnNames: _col1 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col1 = 2) (type: boolean) + predicate: (_col1 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE @@ -3118,7 +3118,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 32560 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) - Map 5 + Map 5 Map Operator Tree: TableScan alias: part_null @@ -3136,7 +3136,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) - Map 7 + Map 7 Map Operator Tree: TableScan alias: part @@ -3154,7 +3154,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Map 9 + Map 9 Map Operator Tree: TableScan alias: part @@ -3173,7 +3173,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reducer 10 + Reducer 10 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) @@ -3190,7 +3190,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: boolean) - Reducer 2 + Reducer 2 Reduce Operator Tree: Join Operator condition map: @@ -3208,26 +3208,26 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 32745 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - sort order: + sort order: Statistics: Num rows: 1 Data size: 32745 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) - Reducer 3 + Reducer 3 Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 keys: - 0 - 1 + 0 + 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11 Statistics: Num rows: 1 Data size: 32762 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col3 (type: string) - sort order: + - Map-reduce partition columns: _col3 (type: string) + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) Statistics: Num rows: 1 Data size: 32762 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: bigint), _col11 (type: bigint) - Reducer 4 + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: bigint), _col11 (type: bigint) + Reducer 4 Reduce Operator Tree: Join Operator condition map: @@ -3251,7 +3251,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 6 + Reducer 6 Reduce Operator Tree: Group By Operator aggregations: min(VALUE._col0) @@ -3262,7 +3262,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) - Reducer 8 + Reducer 8 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0), count(VALUE._col1) @@ -4192,17 +4192,17 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE - Group By Operator - keys: _col0 (type: string), _col1 (type: string) - mode: hash - outputColumnNames: _col0, _col1 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Group By Operator + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Reducer 6 Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/spark/subquery_select.q.out b/ql/src/test/results/clientpositive/spark/subquery_select.q.out index e36c74a91c..6d839facd6 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_select.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_select.q.out @@ -79,7 +79,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -252,7 +252,7 @@ STAGE PLANS: outputColumnNames: _col1, _col3, _col4, _col7 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int), CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col1 (type: int), CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col1 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -433,7 +433,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (true) WHEN (_col4 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (true) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (true) WHEN (_col4 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (true) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -674,7 +674,7 @@ STAGE PLANS: outputColumnNames: _col1, _col5, _col6, _col9 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int), CASE WHEN ((_col5 = 0)) THEN (true) WHEN (_col5 is null) THEN (true) WHEN (_col9 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col6 < _col5)) THEN (null) ELSE (true) END (type: boolean) + expressions: _col1 (type: int), CASE WHEN ((_col5 = 0L)) THEN (true) WHEN (_col5 is null) THEN (true) WHEN (_col9 is not null) THEN (false) WHEN (_col1 is null) THEN (null) WHEN ((_col6 < _col5)) THEN (null) ELSE (true) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2130,7 +2130,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -3111,7 +3111,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2, _col4, _col5, _col8, _col9, _col10, _col12 Statistics: Num rows: 33 Data size: 10556 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: int), (CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (null) ELSE (false) END and CASE WHEN ((_col9 = 0)) THEN (false) WHEN (_col12 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (false) END) (type: boolean) + expressions: _col2 (type: int), (CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col2 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (null) ELSE (false) END and CASE WHEN ((_col9 = 0L)) THEN (false) WHEN (_col12 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col10 < _col9)) THEN (null) ELSE (false) END) (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 33 Data size: 10556 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -3493,7 +3493,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), CASE WHEN ((_col1 = 0)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) + expressions: _col0 (type: int), CASE WHEN ((_col1 = 0L)) THEN (false) WHEN (_col4 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col2 < _col1)) THEN (null) ELSE (false) END (type: boolean) outputColumnNames: _col0, _col1 Statistics: Num rows: 28 Data size: 3947 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -3767,7 +3767,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2, _col3 Statistics: Num rows: 26 Data size: 2470 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CASE WHEN ((_col1 > 409437)) THEN (_col2) ELSE (_col3) END (type: double) + expressions: CASE WHEN ((_col1 > 409437L)) THEN (_col2) ELSE (_col3) END (type: double) outputColumnNames: _col0 Statistics: Num rows: 26 Data size: 2470 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -3953,7 +3953,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (_col0 - 1) (type: bigint) + expressions: (_col0 - 1L) (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator @@ -4727,7 +4727,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col2 > 0) (type: boolean) + predicate: (_col2 > 0L) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int) @@ -5105,7 +5105,7 @@ STAGE PLANS: outputColumnNames: _col1, _col2 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col2 > 0) (type: boolean) + predicate: (_col2 > 0L) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int) diff --git a/ql/src/test/results/clientpositive/spark/subquery_views.q.out b/ql/src/test/results/clientpositive/spark/subquery_views.q.out index 14be0186dd..7d611e1d11 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_views.q.out @@ -236,7 +236,7 @@ STAGE PLANS: outputColumnNames: _col0, _col4, _col5, _col8 Statistics: Num rows: 605 Data size: 6426 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col4 = 0)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) Statistics: Num rows: 302 Data size: 3207 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -324,7 +324,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col4, _col5, _col8 Statistics: Num rows: 605 Data size: 6426 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: CASE WHEN ((_col4 = 0)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) + predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean) Statistics: Num rows: 302 Data size: 3207 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/spark/temp_table.q.out b/ql/src/test/results/clientpositive/spark/temp_table.q.out index 6672b1451c..71fa11d9b8 100644 --- a/ql/src/test/results/clientpositive/spark/temp_table.q.out +++ b/ql/src/test/results/clientpositive/spark/temp_table.q.out @@ -19,7 +19,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -86,7 +86,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 2.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 2.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/udf_example_add.q.out b/ql/src/test/results/clientpositive/spark/udf_example_add.q.out index 587c68b6a4..9e2a474378 100644 --- a/ql/src/test/results/clientpositive/spark/udf_example_add.q.out +++ b/ql/src/test/results/clientpositive/spark/udf_example_add.q.out @@ -35,7 +35,7 @@ STAGE PLANS: TableScan alias: src Select Operator - expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) + expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003D (type: double), 6.6D (type: double), 11.0D (type: double), 10.4D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Limit Number of rows: 1 diff --git a/ql/src/test/results/clientpositive/spark/union.q.out b/ql/src/test/results/clientpositive/spark/union.q.out index fb1ad65e2f..8feea55cb4 100644 --- a/ql/src/test/results/clientpositive/spark/union.q.out +++ b/ql/src/test/results/clientpositive/spark/union.q.out @@ -29,7 +29,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -48,7 +48,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/union20.q.out b/ql/src/test/results/clientpositive/spark/union20.q.out index bb77c428da..d6934a949a 100644 --- a/ql/src/test/results/clientpositive/spark/union20.q.out +++ b/ql/src/test/results/clientpositive/spark/union20.q.out @@ -55,7 +55,7 @@ STAGE PLANS: alias: s2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -89,7 +89,7 @@ STAGE PLANS: alias: s4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/union22.q.out b/ql/src/test/results/clientpositive/spark/union22.q.out index 9a8e55308e..5425fa2197 100644 --- a/ql/src/test/results/clientpositive/spark/union22.q.out +++ b/ql/src/test/results/clientpositive/spark/union22.q.out @@ -89,7 +89,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(k0) > 50.0) and (UDFToDouble(k1) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(k0) > 50.0D) and (UDFToDouble(k1) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 1862 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k3 (type: string), k4 (type: string) @@ -172,7 +172,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k0) <= 50.0) (type: boolean) + predicate: (UDFToDouble(k0) <= 50.0D) (type: boolean) Statistics: Num rows: 166 Data size: 5622 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k2 (type: string), k3 (type: string), k4 (type: string) @@ -268,7 +268,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k1) > 20.0) (type: boolean) + predicate: (UDFToDouble(k1) > 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 3693 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k2 (type: string), ds (type: string) diff --git a/ql/src/test/results/clientpositive/spark/union24.q.out b/ql/src/test/results/clientpositive/spark/union24.q.out index a901f49640..145869f7f7 100644 --- a/ql/src/test/results/clientpositive/spark/union24.q.out +++ b/ql/src/test/results/clientpositive/spark/union24.q.out @@ -87,7 +87,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -174,7 +174,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -261,7 +261,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -348,7 +348,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -545,7 +545,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -632,7 +632,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -719,7 +719,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -792,7 +792,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -981,7 +981,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -1068,7 +1068,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -1155,7 +1155,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -1228,7 +1228,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/union27.q.out b/ql/src/test/results/clientpositive/spark/union27.q.out index 8d5d69ad30..320ac12acb 100644 --- a/ql/src/test/results/clientpositive/spark/union27.q.out +++ b/ql/src/test/results/clientpositive/spark/union27.q.out @@ -55,7 +55,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -72,7 +72,7 @@ STAGE PLANS: alias: dim_pho Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -90,7 +90,7 @@ STAGE PLANS: alias: jackson_sev_add Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/union33.q.out b/ql/src/test/results/clientpositive/spark/union33.q.out index 8e3e2a20f0..a518c6a77c 100644 --- a/ql/src/test/results/clientpositive/spark/union33.q.out +++ b/ql/src/test/results/clientpositive/spark/union33.q.out @@ -43,7 +43,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -218,7 +218,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out index 9bdcaf656a..0e86bb1a74 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out @@ -70,7 +70,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -108,7 +108,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out index cf89dab3f8..57a9af71b6 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out @@ -86,7 +86,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out index cf89dab3f8..57a9af71b6 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out @@ -86,7 +86,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out index 896e2d879c..5d28f00f23 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out @@ -223,7 +223,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 7.0) (type: boolean) + predicate: (UDFToDouble(key) = 7.0D) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -354,7 +354,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0) (type: boolean) + predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0D) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out index 5fa05a9a0c..5bd51c520c 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out @@ -82,7 +82,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -99,7 +99,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out index 3d311eae36..e76e9db799 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out @@ -87,7 +87,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -104,7 +104,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out index 3034bd1cde..0a67096ee2 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out @@ -272,7 +272,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (_col0 - 200) (type: bigint) + expressions: (_col0 - 200L) (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -397,7 +397,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (_col1 * 2) (type: bigint) + expressions: _col0 (type: string), (_col1 * 2L) (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out index 9d651ed9ea..56720f34f7 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out @@ -82,7 +82,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -99,7 +99,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out index ce5e83f8f4..9a0e43006a 100644 --- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out +++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out @@ -31,7 +31,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -51,7 +51,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -71,7 +71,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -363,7 +363,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -383,7 +383,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -403,7 +403,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -581,7 +581,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -601,7 +601,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -621,7 +621,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -786,7 +786,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 0.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -806,7 +806,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 1.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 1.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -826,7 +826,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) % 3.0) = 2.0) (type: boolean) + predicate: ((UDFToDouble(key) % 3.0D) = 2.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out index 75397d7d7d..9f5fa2a0f7 100644 --- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out @@ -55,7 +55,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnInList(col 3:date, values [-67, -171]) - predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean) + predicate: (cdate) IN (DATE'1969-10-26', DATE'1969-07-14') (type: boolean) Statistics: Num rows: 6145 Data size: 1233908 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date) @@ -148,7 +148,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: SelectColumnIsFalse(col 5:boolean)(children: LongColumnInList(col 3, values [-67, -171, 20]) -> 5:boolean) - predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean) + predicate: (not (cdate) IN (DATE'1969-10-26', DATE'1969-07-14', DATE'1970-01-21')) (type: boolean) Statistics: Num rows: 6144 Data size: 1233707 Basic stats: COMPLETE Column stats: NONE Select Operator Select Vectorization: @@ -455,7 +455,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnBetween(col 3:date, left -2, right 1) - predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean) + predicate: cdate BETWEEN DATE'1969-12-30' AND DATE'1970-01-02' (type: boolean) Statistics: Num rows: 1365 Data size: 274090 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date) @@ -548,7 +548,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnNotBetween(col 3:date, left -610, right 608) - predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean) + predicate: cdate NOT BETWEEN DATE'1968-05-01' AND DATE'1971-09-01' (type: boolean) Statistics: Num rows: 10924 Data size: 2193525 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdate (type: date) @@ -1083,7 +1083,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean) + expressions: (cdate) IN (DATE'1969-10-26', DATE'1969-07-14') (type: boolean) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1355,13 +1355,13 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean) + expressions: cdate BETWEEN DATE'1969-12-30' AND DATE'1970-01-02' (type: boolean) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator native: true projectedOutputColumnNums: [5] - selectExpressions: VectorUDFAdaptor(cdate BETWEEN 1969-12-30 AND 1970-01-02) -> 5:boolean + selectExpressions: VectorUDFAdaptor(cdate BETWEEN DATE'1969-12-30' AND DATE'1970-01-02') -> 5:boolean Statistics: Num rows: 12289 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out index 9449e68c2b..d81781e881 100644 --- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out @@ -141,7 +141,7 @@ STAGE PLANS: projectedOutputColumnNums: [2] Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: avg(50), avg(50.0), avg(50) + aggregations: avg(50), avg(50.0D), avg(50) Group By Vectorization: aggregators: VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 12:int) -> struct, VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 13:double) -> struct, VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 14:decimal(10,0)) -> struct className: VectorGroupByOperator diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out index bbe9431571..63918729b7 100644 --- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out @@ -156,7 +156,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 9:bigint, val 1) - predicate: (_col9 > 1) (type: boolean) + predicate: (_col9 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 360813 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)) @@ -312,7 +312,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 6144 Data size: 1082441 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col15 > 1) (type: boolean) + predicate: (_col15 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 360813 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: decimal(24,14)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: decimal(27,18)), _col13 (type: double), _col14 (type: double) @@ -516,7 +516,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 9:bigint, val 1) - predicate: (_col9 > 1) (type: boolean) + predicate: (_col9 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 57740 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 (type: decimal(26,0)) @@ -691,7 +691,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 6144 Data size: 173221 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col15 > 1) (type: boolean) + predicate: (_col15 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 57740 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 (type: decimal(15,9)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(16,0)), _col10 (type: decimal(16,0)), _col11 (type: decimal(26,0)), _col12 (type: decimal(20,4)), _col13 (type: double), _col14 (type: double) diff --git a/ql/src/test/results/clientpositive/spark/vector_elt.q.out b/ql/src/test/results/clientpositive/spark/vector_elt.q.out index b717fda722..f70863ad75 100644 --- a/ql/src/test/results/clientpositive/spark/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_elt.q.out @@ -31,7 +31,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 0:tinyint, val 0) - predicate: (ctinyint > 0) (type: boolean) + predicate: (ctinyint > 0Y) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string) diff --git a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out index 911a028210..30dbaf1f28 100644 --- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out @@ -342,7 +342,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0) + 1.0)))), '-'), UDFToString(year(dt))) (type: string) + expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0D) + 1.0D)))), '-'), UDFToString(year(dt))) (type: string) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out index 3b5189fa16..f4d3558fe5 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out @@ -1642,7 +1642,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0 + _col0) (type: double), _col1 (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) + (-6432.0 + _col0)) (type: double), _col2 (type: double), (- (-6432.0 + _col0)) (type: double), (-6432.0 + (- (-6432.0 + _col0))) (type: double), (- (-6432.0 + _col0)) (type: double), ((- (-6432.0 + _col0)) / (- (-6432.0 + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0 + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) + expressions: _col0 (type: double), (- _col0) (type: double), (-6432.0D + _col0) (type: double), _col1 (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) + (-6432.0D + _col0)) (type: double), _col2 (type: double), (- (-6432.0D + _col0)) (type: double), (-6432.0D + (- (-6432.0D + _col0))) (type: double), (- (-6432.0D + _col0)) (type: double), ((- (-6432.0D + _col0)) / (- (-6432.0D + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432.0D + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 Statistics: Num rows: 1 Data size: 260 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_1.q.out b/ql/src/test/results/clientpositive/spark/vectorization_1.q.out index 8090d90466..4b4e5d7ce8 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_1.q.out @@ -129,7 +129,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 / -26.28) (type: double), _col1 (type: double), (-1.389 + _col1) (type: double), (_col1 * (-1.389 + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389 + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175 % (- (_col1 * (-1.389 + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) + expressions: _col0 (type: double), (_col0 / -26.28D) (type: double), _col1 (type: double), (-1.389D + _col1) (type: double), (_col1 * (-1.389D + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389D + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175D % (- (_col1 * (-1.389D + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_10.q.out b/ql/src/test/results/clientpositive/spark/vectorization_10.q.out index 207e4b63aa..19c3e1de4a 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_10.q.out @@ -72,10 +72,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringGroupColLessEqualStringScalar(col 7:string, val 10), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterDecimalScalarGreaterEqualDecimalColumn(val -5638.15, col 14:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(6,2))), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 6981.0), FilterExprOrExpr(children: FilterDecimalColEqualDecimalScalar(col 15:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(11,4)), FilterStringColLikeStringScalar(col 6:string, pattern %a)))) - predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) + predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0D) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) Statistics: Num rows: 9557 Data size: 2054789 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639 - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) + expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0D) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639D - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_11.q.out b/ql/src/test/results/clientpositive/spark/vectorization_11.q.out index 3d881a40f9..35f032b2ae 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_11.q.out @@ -57,7 +57,7 @@ STAGE PLANS: predicate: ((cstring2 = cstring1) or (ctimestamp1 is null and (cstring1 like '%a'))) (type: boolean) Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0) (type: double), (cdouble * -5638.15) (type: double) + expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639D) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0D) (type: double), (cdouble * -5638.15D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_12.q.out b/ql/src/test/results/clientpositive/spark/vectorization_12.q.out index 591de4b388..ca76191a42 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_12.q.out @@ -157,7 +157,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1877 Data size: 403561 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0 * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0 * _col0) / -6432.0) (type: double), (- ((-6432.0 * _col0) / -6432.0)) (type: double), _col6 (type: double), (- (-6432.0 * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0 * _col0)) (type: double), (- (- ((-6432.0 * _col0) / -6432.0))) (type: double), (((-6432.0 * _col0) / -6432.0) + (- (-6432.0 * _col0))) (type: double), _col8 (type: double) + expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0D * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0D * _col0) / -6432.0D) (type: double), (- ((-6432.0D * _col0) / -6432.0D)) (type: double), _col6 (type: double), (- (-6432.0D * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0D * _col0)) (type: double), (- (- ((-6432.0D * _col0) / -6432.0D))) (type: double), (((-6432.0D * _col0) / -6432.0D) + (- (-6432.0D * _col0))) (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19 Statistics: Num rows: 1877 Data size: 403561 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out index e6c1f122c9..733eca4c65 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out @@ -93,7 +93,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 11.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 12.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > 11.0D) and (UDFToDouble(ctimestamp2) <> 12.0D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -159,7 +159,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -416,7 +416,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -1.388)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val -1.3359999999999999)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > -1.388D) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -474,7 +474,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out index 6fa51e53f7..749a8f0bf5 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out @@ -93,10 +93,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColLessEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterTimestampColLessTimestampColumn(col 9:timestamp, col 8:timestamp)), FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -257), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float))) - predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) + predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257L) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -160,7 +160,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28 + _col2) (type: double), (- (-26.28 + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28 + _col2)) / 10.175) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28 + _col2)) / 10.175)) (type: double), (-1.389 % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) + expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175D) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out index 3e7aa0aaa8..dde8c91563 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out @@ -89,7 +89,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %ss%), FilterStringColLikeStringScalar(col 6:string, pattern 10%), FilterExprAndExpr(children: FilterLongColGreaterEqualLongScalar(col 2:int, val -75), FilterLongColEqualLongColumn(col 0:smallint, col 1:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -3728.0))) - predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) + predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0D)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -154,7 +154,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) + expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out index 24cae48890..f8b54f3a6a 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out @@ -66,7 +66,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -132,7 +132,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out index 851c1806b5..1516b6bef3 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out @@ -74,10 +74,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -23), FilterExprOrExpr(children: FilterDoubleColNotEqualDoubleScalar(col 5:double, val 988888.0), FilterDecimalColGreaterDecimalScalar(col 13:decimal(13,3), val -863.257)(children: CastLongToDecimal(col 2:int) -> 13:decimal(13,3))), FilterExprOrExpr(children: FilterLongColGreaterEqualLongScalar(col 0:tinyint, val 33), FilterLongColGreaterEqualLongColumn(col 1:bigint, col 3:bigint)(children: col 1:smallint), FilterDoubleColEqualDoubleColumn(col 4:double, col 5:double)(children: col 4:float))) - predicate: (((cdouble <> 988888.0) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23)) (type: boolean) + predicate: (((cdouble <> 988888.0D) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33Y) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23L)) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58 + (- (- cdouble))) (type: double) + expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58D + (- (- cdouble))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_2.q.out b/ql/src/test/results/clientpositive/spark/vectorization_2.q.out index f11854c39c..76085573a7 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_2.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_2.q.out @@ -72,7 +72,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessTimestampColumn(col 8:timestamp, col 9:timestamp), FilterStringColLikeStringScalar(col 7:string, pattern b%), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -5638.14990234375)), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterDoubleScalarNotEqualDoubleColumn(val -10669.0, col 13:double)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterLongScalarGreaterLongColumn(val 359, col 2:int)))) - predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0 <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) + predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0D <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cbigint (type: bigint), cfloat (type: float), cdouble (type: double) @@ -133,7 +133,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 % -563.0) (type: double), (_col0 + 762.0) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) + expressions: _col0 (type: double), (_col0 % -563.0D) (type: double), (_col0 + 762.0D) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0D) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_3.q.out b/ql/src/test/results/clientpositive/spark/vectorization_3.q.out index 253e12cf19..fb66369f68 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_3.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_3.q.out @@ -77,7 +77,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 14:decimal(22,3))(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterDoubleColEqualDoubleScalar(col 13:double, val -29071.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterDecimalScalarLessEqualDecimalColumn(val 79.553, col 15:decimal(8,3))(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(8,3)), FilterTimestampColGreaterTimestampColumn(col 8:timestamp, col 9:timestamp))) - predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0))) (type: boolean) + predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0D))) (type: boolean) Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float) @@ -138,7 +138,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 - 10.175) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175)) (type: double), (- _col1) (type: double), (_col0 % 79.553) (type: double), (- (_col0 * (_col0 - 10.175))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175))) / (_col0 - 10.175)) (type: double), (- (_col0 - 10.175)) (type: double), _col4 (type: double), (-3728.0 - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) + expressions: _col0 (type: double), (_col0 - 10.175D) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175D)) (type: double), (- _col1) (type: double), (_col0 % 79.553D) (type: double), (- (_col0 * (_col0 - 10.175D))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175D))) / (_col0 - 10.175D)) (type: double), (- (_col0 - 10.175D)) (type: double), _col4 (type: double), (-3728.0D - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_4.q.out b/ql/src/test/results/clientpositive/spark/vectorization_4.q.out index 436fdd9efc..626802577f 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_4.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_4.q.out @@ -72,7 +72,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterEqualLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterExprAndExpr(children: FilterLongScalarGreaterEqualLongColumn(val -89010, col 0:int)(children: col 0:tinyint), FilterDoubleColGreaterDoubleScalar(col 5:double, val 79.553)), FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -563, col 3:bigint), FilterExprOrExpr(children: FilterLongColNotEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterDoubleScalarGreaterEqualDoubleColumn(val -3728.0, col 5:double)))) - predicate: (((-563 <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0 >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) + predicate: (((-563L <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0D >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553D)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cdouble (type: double) @@ -133,7 +133,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: bigint), (_col0 * -563) (type: bigint), (-3728 + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2)) (type: double), ((-3728 + _col0) - (_col0 * -563)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2))) (type: double) + expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2)) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_5.q.out b/ql/src/test/results/clientpositive/spark/vectorization_5.q.out index 6b3b9b0222..03e4ca996d 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_5.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_5.q.out @@ -143,7 +143,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0 % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0D % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_6.q.out b/ql/src/test/results/clientpositive/spark/vectorization_6.q.out index b16d7b0645..bb4ac9dcf8 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_6.q.out @@ -66,10 +66,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 10:boolean, val 0), FilterLongColGreaterEqualLongColumn(col 11:boolean, col 10:boolean)), FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:bigint), FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %a), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -257.0))))) - predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0)) (type: boolean) + predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 11605 Data size: 2495116 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28 / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) + expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28D / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out index 24cae48890..f8b54f3a6a 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out @@ -66,7 +66,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -132,7 +132,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out index 530bdf6069..e679a75086 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out @@ -25,7 +25,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: (UDFToDouble(cint) / 0.0) (type: double), (UDFToDouble(ctinyint) / 0.0) (type: double), (UDFToDouble(cbigint) / 0.0) (type: double), (cdouble / 0.0) (type: double) + expressions: (UDFToDouble(cint) / 0.0D) (type: double), (UDFToDouble(ctinyint) / 0.0D) (type: double), (UDFToDouble(cbigint) / 0.0D) (type: double), (cdouble / 0.0D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -209,7 +209,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val 0), FilterLongColLessLongScalar(col 3:bigint, val 100000000)) - predicate: ((cbigint < 100000000) and (cbigint > 0)) (type: boolean) + predicate: ((cbigint < 100000000L) and (cbigint > 0L)) (type: boolean) Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (cbigint - 988888) (type: bigint), (cdouble / UDFToDouble((cbigint - 988888))) (type: double), (1.2 / CAST( (cbigint - 988888) AS decimal(19,0))) (type: decimal(22,21)) @@ -425,10 +425,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -500.0), FilterDoubleColLessDoubleScalar(col 5:double, val -199.0)) - predicate: ((cdouble < -199.0) and (cdouble >= -500.0)) (type: boolean) + predicate: ((cdouble < -199.0D) and (cdouble >= -500.0D)) (type: boolean) Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 200.0) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0)) (type: double), ((cdouble + 200.0) / (cdouble + 200.0)) (type: double), (3.0 / (cdouble + 200.0)) (type: double), (1.2 / (cdouble + 200.0)) (type: double) + expressions: (cdouble + 200.0D) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0D)) (type: double), ((cdouble + 200.0D) / (cdouble + 200.0D)) (type: double), (3.0D / (cdouble + 200.0D)) (type: double), (1.2D / (cdouble + 200.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -641,7 +641,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 2:int, val 500000000), FilterDoubleColGreaterDoubleScalar(col 5:double, val 1.0E9), FilterLongColEqualLongScalar(col 0:tinyint, val 0)) - predicate: ((cdouble > 1.0E9) or (cint > 500000000) or (ctinyint = 0)) (type: boolean) + predicate: ((cdouble > 1.0E9D) or (cint > 500000000) or (ctinyint = 0Y)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cint (type: int), cbigint (type: bigint), ctinyint (type: tinyint), (cint / (cint - 528534767)) (type: double), (cbigint / (cbigint - 1018195815)) (type: double), (ctinyint / ctinyint) (type: double), (cint % (cint - 528534767)) (type: int), (cbigint % (cbigint - 1018195815)) (type: bigint), (ctinyint % ctinyint) (type: tinyint) diff --git a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out index 6be55bdb9b..53e8bbf0ac 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out @@ -71,7 +71,7 @@ STAGE PLANS: alias: alltypesorc_part Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 2.0) (type: double) + expressions: (cdouble + 2.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out index 9683efa1d4..339ba9e519 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out @@ -99,7 +99,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongScalarEqualLongColumn(val 762, col 3:bigint), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 1:smallint) -> 13:float), FilterDoubleColGreaterDoubleScalar(col 13:double, val -5.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 2:int) -> 13:double)), FilterStringGroupColEqualStringScalar(col 6:string, val a), FilterExprAndExpr(children: FilterDecimalColLessEqualDecimalScalar(col 14:decimal(22,3), val -1.389)(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterStringGroupColNotEqualStringScalar(col 7:string, val a), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 15:decimal(13,3))(children: CastLongToDecimal(col 2:int) -> 15:decimal(13,3)), FilterLongColNotEqualLongColumn(col 11:boolean, col 10:boolean))) - predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (762 = cbigint) or (cstring1 = 'a')) (type: boolean) + predicate: (((CAST( cbigint AS decimal(22,3)) <= -1.389) and (cstring2 <> 'a') and (79.553 <> CAST( cint AS decimal(13,3))) and (cboolean2 <> cboolean1)) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0D) and (cdouble <> UDFToDouble(cint))) or (762L = cbigint) or (cstring1 = 'a')) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float), cdouble (type: double) @@ -152,7 +152,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 492 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 + -3728.0) (type: double), (- (_col0 + -3728.0)) (type: double), (- (- (_col0 + -3728.0))) (type: double), ((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0))) * (_col0 + -3728.0)) * (- (- (_col0 + -3728.0)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175 - _col4) (type: double), (- (10.175 - _col4)) (type: double), ((- _col2) / -563.0) (type: double), _col6 (type: double), (- ((- _col2) / -563.0)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0)) (type: double), (- (_col0 / _col1)) (type: double) + expressions: _col0 (type: double), (_col0 + -3728.0D) (type: double), (- (_col0 + -3728.0D)) (type: double), (- (- (_col0 + -3728.0D))) (type: double), ((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) (type: double), _col1 (type: double), (- _col0) (type: double), _col2 (type: double), (((- (- (_col0 + -3728.0D))) * (_col0 + -3728.0D)) * (- (- (_col0 + -3728.0D)))) (type: double), _col3 (type: double), (- _col2) (type: double), (_col2 - (- (- (_col0 + -3728.0D)))) (type: double), ((_col2 - (- (- (_col0 + -3728.0D)))) * _col2) (type: double), _col4 (type: double), _col5 (type: double), (10.175D - _col4) (type: double), (- (10.175D - _col4)) (type: double), ((- _col2) / -563.0D) (type: double), _col6 (type: double), (- ((- _col2) / -563.0D)) (type: double), (_col0 / _col1) (type: double), _col7 (type: tinyint), _col8 (type: bigint), (UDFToDouble(_col7) / ((- _col2) / -563.0D)) (type: double), (- (_col0 / _col1)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Statistics: Num rows: 1 Data size: 492 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -341,7 +341,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 3:bigint, val 197), FilterLongColLessLongColumn(col 2:bigint, col 3:bigint)(children: col 2:int)), FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -26.28), FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 1:smallint) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 0:tinyint) -> 13:float), FilterStringColRegExpStringScalar(col 6:string, pattern .*ss.*)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 4:float, val 79.5530014038086), FilterStringColLikeStringScalar(col 7:string, pattern 10%))) - predicate: (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean) + predicate: (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cbigint <= 197L) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28D) and (UDFToDouble(csmallint) > cdouble)) or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean) Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cdouble (type: double) @@ -394,7 +394,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 420 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), (UDFToDouble(_col0) / -3728.0) (type: double), (_col0 * -3728) (type: int), _col1 (type: double), (- (_col0 * -3728)) (type: int), _col2 (type: double), (-563 % (_col0 * -3728)) (type: int), (_col1 / _col2) (type: double), (- _col2) (type: double), _col3 (type: double), _col4 (type: double), (_col2 - 10.175) (type: double), _col5 (type: int), (UDFToDouble((_col0 * -3728)) % (_col2 - 10.175)) (type: double), (- _col3) (type: double), _col6 (type: double), (_col3 % -26.28) (type: double), _col7 (type: double), (- (UDFToDouble(_col0) / -3728.0)) (type: double), ((- (_col0 * -3728)) % (-563 % (_col0 * -3728))) (type: int), ((UDFToDouble(_col0) / -3728.0) - _col4) (type: double), (- (_col0 * -3728)) (type: int), _col8 (type: double) + expressions: _col0 (type: int), (UDFToDouble(_col0) / -3728.0D) (type: double), (_col0 * -3728) (type: int), _col1 (type: double), (- (_col0 * -3728)) (type: int), _col2 (type: double), (-563 % (_col0 * -3728)) (type: int), (_col1 / _col2) (type: double), (- _col2) (type: double), _col3 (type: double), _col4 (type: double), (_col2 - 10.175D) (type: double), _col5 (type: int), (UDFToDouble((_col0 * -3728)) % (_col2 - 10.175D)) (type: double), (- _col3) (type: double), _col6 (type: double), (_col3 % -26.28D) (type: double), _col7 (type: double), (- (UDFToDouble(_col0) / -3728.0D)) (type: double), ((- (_col0 * -3728)) % (-563 % (_col0 * -3728))) (type: int), ((UDFToDouble(_col0) / -3728.0D) - _col4) (type: double), (- (_col0 * -3728)) (type: int), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22 Statistics: Num rows: 1 Data size: 420 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -628,7 +628,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (- _col0) (type: double), (_col0 - (- _col0)) (type: double), _col1 (type: bigint), (CAST( _col1 AS decimal(19,0)) % 79.553) (type: decimal(5,3)), _col2 (type: tinyint), (UDFToDouble(_col1) - (- _col0)) (type: double), (- (- _col0)) (type: double), (-1.0 % (- _col0)) (type: double), _col1 (type: bigint), (- _col1) (type: bigint), _col3 (type: double), (- (- (- _col0))) (type: double), (762 * (- _col1)) (type: bigint), _col4 (type: int), (UDFToLong(_col2) + (762 * (- _col1))) (type: bigint), ((- _col0) + UDFToDouble(_col4)) (type: double), _col5 (type: double), ((- _col1) % _col1) (type: bigint), _col6 (type: bigint), _col7 (type: double), (-3728 % (UDFToLong(_col2) + (762 * (- _col1)))) (type: bigint) + expressions: _col0 (type: double), (- _col0) (type: double), (_col0 - (- _col0)) (type: double), _col1 (type: bigint), (CAST( _col1 AS decimal(19,0)) % 79.553) (type: decimal(5,3)), _col2 (type: tinyint), (UDFToDouble(_col1) - (- _col0)) (type: double), (- (- _col0)) (type: double), (-1.0D % (- _col0)) (type: double), _col1 (type: bigint), (- _col1) (type: bigint), _col3 (type: double), (- (- (- _col0))) (type: double), (762L * (- _col1)) (type: bigint), _col4 (type: int), (UDFToLong(_col2) + (762L * (- _col1))) (type: bigint), ((- _col0) + UDFToDouble(_col4)) (type: double), _col5 (type: double), ((- _col1) % _col1) (type: bigint), _col6 (type: bigint), _col7 (type: double), (-3728L % (UDFToLong(_col2) + (762L * (- _col1)))) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -788,7 +788,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessEqualTimestampColumn(col 9:timestamp, col 8:timestamp), FilterDoubleColNotEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterStringScalarLessEqualStringGroupColumn(val ss, col 6:string)), FilterExprAndExpr(children: FilterLongColLessLongColumn(col 1:smallint, col 0:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double)), FilterDoubleColEqualDoubleScalar(col 4:float, val 17.0)) - predicate: (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or ((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or (cfloat = 17)) (type: boolean) + predicate: (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0D)) or ((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or (cfloat = 17)) (type: boolean) Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cbigint (type: bigint), cfloat (type: float) @@ -841,7 +841,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 + 6981.0) (type: double), ((_col0 + 6981.0) + _col0) (type: double), _col1 (type: bigint), (((_col0 + 6981.0) + _col0) / _col0) (type: double), (- (_col0 + 6981.0)) (type: double), _col2 (type: double), (_col0 % (- (_col0 + 6981.0))) (type: double), _col3 (type: double), _col4 (type: double), (- _col1) (type: bigint), (UDFToDouble((- _col1)) / _col2) (type: double), _col5 (type: float), (_col4 * -26.28) (type: double) + expressions: _col0 (type: double), (_col0 + 6981.0D) (type: double), ((_col0 + 6981.0D) + _col0) (type: double), _col1 (type: bigint), (((_col0 + 6981.0D) + _col0) / _col0) (type: double), (- (_col0 + 6981.0D)) (type: double), _col2 (type: double), (_col0 % (- (_col0 + 6981.0D))) (type: double), _col3 (type: double), _col4 (type: double), (- _col1) (type: bigint), (UDFToDouble((- _col1)) / _col2) (type: double), _col5 (type: float), (_col4 * -26.28D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1012,7 +1012,7 @@ STAGE PLANS: predicate: (((1 <> cboolean2) and (CAST( csmallint AS decimal(8,3)) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint)) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or (cstring1 regexp 'a.*' and (cstring2 like '%ss%'))) (type: boolean) Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - CAST( cint AS decimal(10,0))) (type: decimal(14,3)), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - CAST( cint AS decimal(10,0))) - -26.28) (type: decimal(15,3)), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / CAST( ctinyint AS decimal(3,0))) (type: decimal(9,7)) + expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728L * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - CAST( cint AS decimal(10,0))) (type: decimal(14,3)), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - CAST( cint AS decimal(10,0))) - -26.28) (type: decimal(15,3)), (- cfloat) (type: float), (cdouble * -89010.0D) (type: double), (UDFToDouble(ctinyint) / 988888.0D) (type: double), (- ctinyint) (type: tinyint), (79.553 / CAST( ctinyint AS decimal(3,0))) (type: decimal(9,7)) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22 Select Vectorization: className: VectorSelectOperator @@ -1304,10 +1304,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongScalarGreaterLongColumn(val 197, col 0:int)(children: col 0:tinyint), FilterLongColEqualLongColumn(col 2:bigint, col 3:bigint)(children: col 2:int)), FilterLongColEqualLongScalar(col 3:bigint, val 359), FilterLongColLessLongScalar(col 10:boolean, val 0), FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 6:string, pattern %ss), FilterDoubleColLessEqualDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 0:tinyint) -> 13:float))) - predicate: (((197 > UDFToInteger(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint))) or (cbigint = 359) or (cboolean1 < 0)) (type: boolean) + predicate: (((197 > UDFToInteger(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint))) or (cbigint = 359L) or (cboolean1 < 0)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (CAST( cbigint AS decimal(19,0)) % 79.553) (type: decimal(5,3)), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % cfloat) (type: float), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint) + expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (CAST( cbigint AS decimal(19,0)) % 79.553) (type: decimal(5,3)), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % cfloat) (type: float), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569L % cbigint) (type: bigint), (359.0D - cdouble) (type: double), (- csmallint) (type: smallint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 Select Vectorization: className: VectorSelectOperator @@ -1853,7 +1853,7 @@ STAGE PLANS: predicate: (((-1.389 >= CAST( cint AS decimal(13,3))) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > CAST( cbigint AS decimal(22,3))))) (type: boolean) Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0D) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175D) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Select Vectorization: className: VectorSelectOperator @@ -2152,7 +2152,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1251 Data size: 268968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) % -75) (type: int), _col1 (type: double), (-1.389 / CAST( _col0 AS decimal(5,0))) (type: decimal(10,9)), _col2 (type: bigint), (UDFToDouble((UDFToInteger(_col0) % -75)) / UDFToDouble(_col2)) (type: double), (- (UDFToInteger(_col0) % -75)) (type: int), _col3 (type: double), (- (- (UDFToInteger(_col0) % -75))) (type: int), _col4 (type: bigint), (_col4 - -89010) (type: bigint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) % -75) (type: int), _col1 (type: double), (-1.389 / CAST( _col0 AS decimal(5,0))) (type: decimal(10,9)), _col2 (type: bigint), (UDFToDouble((UDFToInteger(_col0) % -75)) / UDFToDouble(_col2)) (type: double), (- (UDFToInteger(_col0) % -75)) (type: int), _col3 (type: double), (- (- (UDFToInteger(_col0) % -75))) (type: int), _col4 (type: bigint), (_col4 - -89010L) (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1251 Data size: 268968 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -2349,7 +2349,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 2563.58), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColGreaterEqualLongColumn(col 3:bigint, col 2:bigint)(children: col 2:int), FilterLongColLessLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterDoubleColLessDoubleScalar(col 4:float, val -5638.14990234375)), FilterDecimalScalarEqualDecimalColumn(val 2563.58, col 13:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 13:decimal(6,2)), FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 5:double, col 14:double)(children: CastLongToDouble(col 3:bigint) -> 14:double), FilterDecimalScalarGreaterDecimalColumn(val -5638.15, col 15:decimal(21,2))(children: CastLongToDecimal(col 3:bigint) -> 15:decimal(21,2))))) - predicate: ((((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (cfloat < -5638.15)) or (2563.58 = CAST( ctinyint AS decimal(6,2))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > CAST( cbigint AS decimal(21,2))))) and (cdouble > 2563.58)) (type: boolean) + predicate: ((((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (cfloat < -5638.15)) or (2563.58 = CAST( ctinyint AS decimal(6,2))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > CAST( cbigint AS decimal(21,2))))) and (cdouble > 2563.58D)) (type: boolean) Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cfloat (type: float), cdouble (type: double) @@ -2407,7 +2407,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), _col1 (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double) + expressions: _col0 (type: double), _col1 (type: double), (2563.58D * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58D * _col1) + -5638.15D) (type: double), ((- _col1) * ((2563.58D * _col1) + -5638.15D)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0D) (type: double), _col6 (type: double), (-863.257D % (_col0 * 762.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -2648,7 +2648,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColNotEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -257, col 0:int)(children: col 0:tinyint), SelectColumnIsNotNull(col 11:boolean), FilterStringColRegExpStringScalar(col 6:string, pattern .*ss), FilterDoubleScalarLessDoubleColumn(val -3.0, col 13:double)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double)), FilterDoubleColEqualDoubleScalar(col 13:double, val -5.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterStringColLikeStringScalar(col 7:string, pattern %b%)), FilterDoubleColEqualDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterExprAndExpr(children: SelectColumnIsNull(col 10:boolean), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float)))) - predicate: ((((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))) and (UDFToDouble(ctimestamp1) <> 0.0)) (type: boolean) + predicate: ((((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0D < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0D) or ((UDFToDouble(ctimestamp1) < 0.0D) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))) and (UDFToDouble(ctimestamp1) <> 0.0D)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -2706,7 +2706,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / CAST( (- _col5) AS decimal(3,0))) (type: decimal(8,6)), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double) + expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175D) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28D - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28D - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175D)) (type: double), _col6 (type: double), (_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175D / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175D))) (type: double), _col10 (type: double), (((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175D) (type: double), (10.175D % (10.175D / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28D - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / CAST( (- _col5) AS decimal(3,0))) (type: decimal(8,6)), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28D - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -3086,7 +3086,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 5119 Data size: 1100602 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: float), (- _col1) (type: float), (-26.28 / UDFToDouble(_col1)) (type: double), _col2 (type: bigint), (CAST( _col2 AS decimal(19,0)) - 10.175) (type: decimal(23,3)), _col3 (type: double), (_col3 % UDFToDouble(_col1)) (type: double), (10.175 + (- _col1)) (type: float), _col4 (type: double), (UDFToDouble((CAST( _col2 AS decimal(19,0)) - 10.175)) + _col3) (type: double), _col5 (type: bigint), _col6 (type: double), (- (10.175 + (- _col1))) (type: float), (79.553 / _col6) (type: double), (_col3 % (79.553 / _col6)) (type: double), _col7 (type: bigint), _col8 (type: double), (-1.389 * CAST( _col5 AS decimal(19,0))) (type: decimal(24,3)), (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0)))) (type: decimal(25,3)), _col9 (type: double), (- (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0))))) (type: decimal(25,3)), _col10 (type: double), (- _col10) (type: double), (_col10 * UDFToDouble(_col7)) (type: double) + expressions: _col0 (type: boolean), _col1 (type: float), (- _col1) (type: float), (-26.28D / UDFToDouble(_col1)) (type: double), _col2 (type: bigint), (CAST( _col2 AS decimal(19,0)) - 10.175) (type: decimal(23,3)), _col3 (type: double), (_col3 % UDFToDouble(_col1)) (type: double), (10.175 + (- _col1)) (type: float), _col4 (type: double), (UDFToDouble((CAST( _col2 AS decimal(19,0)) - 10.175)) + _col3) (type: double), _col5 (type: bigint), _col6 (type: double), (- (10.175 + (- _col1))) (type: float), (79.553D / _col6) (type: double), (_col3 % (79.553D / _col6)) (type: double), _col7 (type: bigint), _col8 (type: double), (-1.389 * CAST( _col5 AS decimal(19,0))) (type: decimal(24,3)), (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0)))) (type: decimal(25,3)), _col9 (type: double), (- (CAST( _col7 AS decimal(19,0)) - (-1.389 * CAST( _col5 AS decimal(19,0))))) (type: decimal(25,3)), _col10 (type: double), (- _col10) (type: double), (_col10 * UDFToDouble(_col7)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 Statistics: Num rows: 5119 Data size: 1100602 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out index 8fff1ed7ba..5380c9c7c6 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out @@ -60,10 +60,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1:smallint, val 418), FilterLongColEqualLongScalar(col 1:smallint, val 12205), FilterLongColEqualLongScalar(col 1:smallint, val 10583)) - predicate: ((csmallint = 10583) or (csmallint = 12205) or (csmallint = 418)) (type: boolean) + predicate: ((csmallint = 10583S) or (csmallint = 12205S) or (csmallint = 418S)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string) + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -208,10 +208,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1:smallint, val 418), FilterLongColEqualLongScalar(col 1:smallint, val 12205), FilterLongColEqualLongScalar(col 1:smallint, val 10583)) - predicate: ((csmallint = 10583) or (csmallint = 12205) or (csmallint = 418)) (type: boolean) + predicate: ((csmallint = 10583S) or (csmallint = 12205S) or (csmallint = 418S)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN (null) ELSE ('c') END (type: string) + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN (null) ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -840,7 +840,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE ((attr + 2)) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN ((attr + 1L)) ELSE ((attr + 2L)) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -920,7 +920,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN (null) ELSE ((attr + 2)) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN (null) ELSE ((attr + 2L)) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -1000,7 +1000,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (null) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN ((attr + 1L)) ELSE (null) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out index e1faa53778..0cfc14b159 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out @@ -127,10 +127,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 13:bigint, val 0)(children: LongColModuloLongScalar(col 3:bigint, val 500) -> 13:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 14:double, val -1.0)(children: FuncSinDoubleToDouble(col 4:float) -> 14:double)) - predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double) + expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0D)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out index a5575f5ba5..2fccd289a4 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out @@ -1100,7 +1100,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 672 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: round(_col0, 0) (type: double), _col1 BETWEEN 8.97077295279421E19 AND 8.97077295279422E19 (type: boolean), _col2 BETWEEN 8.97077295279421E19 AND 8.97077295279422E19 (type: boolean), _col3 BETWEEN 9.20684592523616E19 AND 9.20684592523617E19 (type: boolean), round(_col4, 3) (type: double), round(_col5, 3) (type: double), round(_col6, 3) (type: double), round(_col7, 3) (type: double) + expressions: round(_col0, 0) (type: double), _col1 BETWEEN 8.97077295279421E19D AND 8.97077295279422E19D (type: boolean), _col2 BETWEEN 8.97077295279421E19D AND 8.97077295279422E19D (type: boolean), _col3 BETWEEN 9.20684592523616E19D AND 9.20684592523617E19D (type: boolean), round(_col4, 3) (type: double), round(_col5, 3) (type: double), round(_col6, 3) (type: double), round(_col7, 3) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 672 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out index 55db5754c8..7a9cc12dbb 100644 --- a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out +++ b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out @@ -79,7 +79,7 @@ STAGE PLANS: alias: s/c Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean) + predicate: ((UDFToDouble(key) < 100.0D) and (UDFToDouble(key) > 80.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/stats_ppr_all.q.out b/ql/src/test/results/clientpositive/stats_ppr_all.q.out index 4b3f62015c..b7256e1ae5 100644 --- a/ql/src/test/results/clientpositive/stats_ppr_all.q.out +++ b/ql/src/test/results/clientpositive/stats_ppr_all.q.out @@ -132,7 +132,7 @@ STAGE PLANS: alias: ss Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL Filter Operator - predicate: (UDFToDouble((((year * 10000) + (month * 100)) + day)) = 2015010.0) (type: boolean) + predicate: (UDFToDouble((((year * 10000) + (month * 100)) + day)) = 2015010.0D) (type: boolean) Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: order_amount (type: float) @@ -231,7 +231,7 @@ STAGE PLANS: alias: ss Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(((201500 + (month * 10)) + day)) > 201511.0) (type: boolean) + predicate: (UDFToDouble(((201500 + (month * 10)) + day)) > 201511.0D) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: order_amount (type: float) @@ -282,7 +282,7 @@ STAGE PLANS: alias: ss Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (UDFToDouble(((201500 + (month * 10)) + day)) > 201511.0) (type: boolean) + predicate: (UDFToDouble(((201500 + (month * 10)) + day)) > 201511.0D) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: '1' (type: string) diff --git a/ql/src/test/results/clientpositive/subq.q.out b/ql/src/test/results/clientpositive/subq.q.out index 4b1e707457..fe7bba54b0 100644 --- a/ql/src/test/results/clientpositive/subq.q.out +++ b/ql/src/test/results/clientpositive/subq.q.out @@ -27,7 +27,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/subq2.q.out b/ql/src/test/results/clientpositive/subq2.q.out index 213388ca83..360d18d3c2 100644 --- a/ql/src/test/results/clientpositive/subq2.q.out +++ b/ql/src/test/results/clientpositive/subq2.q.out @@ -20,7 +20,7 @@ STAGE PLANS: alias: b Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) >= 90.0) (type: boolean) + predicate: (UDFToDouble(key) >= 90.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/subquery_alias.q.out b/ql/src/test/results/clientpositive/subquery_alias.q.out index a304512283..fd06f36363 100644 --- a/ql/src/test/results/clientpositive/subquery_alias.q.out +++ b/ql/src/test/results/clientpositive/subquery_alias.q.out @@ -27,7 +27,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out index 381550a006..e3f3b1a6c9 100644 --- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out @@ -97,12 +97,12 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = 0) (type: boolean) + predicate: (_col0 = 0L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: 0 (type: bigint) + keys: 0L (type: bigint) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE @@ -630,12 +630,12 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = 0) (type: boolean) + predicate: (_col0 = 0L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: 0 (type: bigint) + keys: 0L (type: bigint) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.out index 74239031f6..4adad38b39 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.out @@ -118,7 +118,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 275 Data size: 7596 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 182 Data size: 5027 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) @@ -331,7 +331,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col3, _col4, _col7 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col3 = 0)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col3 = 0L)) THEN (false) WHEN (_col3 is null) THEN (false) WHEN (_col7 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col4 < _col3)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 8 Data size: 1014 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) @@ -375,7 +375,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col2 - _col1) > 600.0) and _col1 is not null) (type: boolean) + predicate: (((_col2 - _col1) > 600.0D) and _col1 is not null) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) @@ -448,7 +448,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col2 - _col1) > 600.0) and _col1 is not null) (type: boolean) + predicate: (((_col2 - _col1) > 600.0D) and _col1 is not null) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) @@ -623,7 +623,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 14 Data size: 2087 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 9 Data size: 1341 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) @@ -667,7 +667,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 - _col2) > 600.0) (type: boolean) + predicate: ((_col1 - _col2) > 600.0D) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -736,7 +736,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 - _col2) > 600.0) (type: boolean) + predicate: ((_col1 - _col2) > 600.0D) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), true (type: boolean) @@ -920,7 +920,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4 Statistics: Num rows: 2 Data size: 209 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = 0) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) + predicate: ((_col1 = 0L) or (_col4 is null and _col0 is not null and (_col2 >= _col1))) (type: boolean) Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) @@ -1102,7 +1102,7 @@ STAGE PLANS: outputColumnNames: _col0, _col2, _col3, _col5 Statistics: Num rows: 4 Data size: 343 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col2 = 0)) THEN (false) WHEN (_col2 is null) THEN (false) WHEN (_col5 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col3 < _col2)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col2 = 0L)) THEN (false) WHEN (_col2 is null) THEN (false) WHEN (_col5 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col3 < _col2)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 2 Data size: 171 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/subquery_unqual_corr_expr.q.out b/ql/src/test/results/clientpositive/subquery_unqual_corr_expr.q.out index 2507d52c0f..422c8c0648 100644 --- a/ql/src/test/results/clientpositive/subquery_unqual_corr_expr.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqual_corr_expr.q.out @@ -127,12 +127,12 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = 0) (type: boolean) + predicate: (_col0 = 0L) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: 0 (type: bigint) + keys: 0L (type: bigint) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index bfb5d2b0a6..da66ce3985 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -438,7 +438,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8 Statistics: Num rows: 30 Data size: 3807 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not CASE WHEN ((_col4 = 0)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) + predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN (_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) (type: boolean) Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col0 (type: string), _col2 (type: int) diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out index 50c53d0972..b7d5b40d10 100644 --- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out +++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out @@ -446,10 +446,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363) and (t < 100)) (type: boolean) + filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2098 Data size: 41920 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b = 4294967363) and (t < 100)) (type: boolean) + predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int) @@ -515,10 +515,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363) and (t < 100)) (type: boolean) + filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2098 Data size: 41920 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b = 4294967363) and (t < 100)) (type: boolean) + predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), t (type: tinyint), si (type: smallint), i (type: int) @@ -587,10 +587,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over10k_orc_bucketed - filterExpr: ((b = 4294967363) and (t < 100)) (type: boolean) + filterExpr: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2098 Data size: 706986 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((b = 4294967363) and (t < 100)) (type: boolean) + predicate: ((b = 4294967363L) and (t < 100Y)) (type: boolean) Statistics: Num rows: 2 Data size: 674 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: ROW__ID (type: struct), t (type: tinyint), si (type: smallint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) @@ -607,7 +607,7 @@ STAGE PLANS: Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), 0 (type: int), 4294967363 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: boolean), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: decimal(4,2)), VALUE._col9 (type: binary) + expressions: KEY.reducesinkkey0 (type: struct), VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), 0 (type: int), 4294967363L (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: boolean), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: decimal(4,2)), VALUE._col9 (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 2 Data size: 834 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -706,7 +706,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col1 > 1) (type: boolean) + predicate: (_col1 > 1L) (type: boolean) Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out index 5cbd032bb4..e7966cb18a 100644 --- a/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out +++ b/ql/src/test/results/clientpositive/tez/explainanalyze_1.q.out @@ -191,7 +191,7 @@ Stage-0 Select Operator [SEL_2] Output:["_col0"] Filter Operator [FIL_4] - predicate:(UDFToDouble(value) < 10.0) + predicate:(UDFToDouble(value) < 10.0D) TableScan [TS_0] Output:["key","value"] @@ -215,7 +215,7 @@ Stage-0 Select Operator [SEL_2] Output:["_col0"] Filter Operator [FIL_4] - predicate:(UDFToDouble(key) < 10.0) + predicate:(UDFToDouble(key) < 10.0D) TableScan [TS_0] Output:["key"] @@ -306,7 +306,7 @@ Stage-0 Select Operator [SEL_2] (rows=166/497 width=87) Output:["_col0"] Filter Operator [FIL_16] (rows=166/497 width=87) - predicate:(UDFToDouble(key) > 0.0) + predicate:(UDFToDouble(key) > 0.0D) TableScan [TS_0] (rows=500/500 width=87) default@src,a,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 4 [SIMPLE_EDGE] @@ -315,7 +315,7 @@ Stage-0 Select Operator [SEL_5] (rows=166/0 width=91) Output:["_col0"] Filter Operator [FIL_17] (rows=166/0 width=91) - predicate:(UDFToDouble(value) > 0.0) + predicate:(UDFToDouble(value) > 0.0D) TableScan [TS_3] (rows=500/500 width=91) default@src,b,Tbl:COMPLETE,Col:COMPLETE,Output:["value"] @@ -355,7 +355,7 @@ Stage-0 Select Operator [SEL_2] (rows=166/497 width=87) Output:["_col0"] Filter Operator [FIL_16] (rows=166/497 width=87) - predicate:(UDFToDouble(key) > 0.0) + predicate:(UDFToDouble(key) > 0.0D) TableScan [TS_0] (rows=500/500 width=87) default@src,a,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 4 [SIMPLE_EDGE] @@ -364,7 +364,7 @@ Stage-0 Select Operator [SEL_5] (rows=166/497 width=87) Output:["_col0"] Filter Operator [FIL_17] (rows=166/497 width=87) - predicate:(UDFToDouble(key) > 0.0) + predicate:(UDFToDouble(key) > 0.0D) TableScan [TS_3] (rows=500/500 width=87) default@src,b,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out index a1ea37a682..8da42f4ece 100644 --- a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out +++ b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out @@ -678,7 +678,7 @@ Stage-3 Select Operator [SEL_7] (rows=1/3 width=352) Output:["_col0","_col1","_col2","_col3","_col4"] Filter Operator [FIL_6] (rows=1/3 width=352) - predicate:(userid <= 13) + predicate:(userid <= 13L) TableScan [TS_0] (rows=1/15000 width=352) default@orc_merge5,orc_merge5,Tbl:COMPLETE,Col:NONE,Output:["userid","string1","subtype","decimal1","ts"] PARTITION_ONLY_SHUFFLE [RS_7] diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out index c6cf6803b5..f87ee43cce 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out @@ -520,7 +520,7 @@ Stage-3 Select Operator [SEL_2] (rows=1 width=352) Output:["_col0","_col1","_col2","_col3","_col4"] Filter Operator [FIL_4] (rows=1 width=352) - predicate:(userid <= 13) + predicate:(userid <= 13L) TableScan [TS_0] (rows=1 width=352) default@orc_merge5,orc_merge5,Tbl:COMPLETE,Col:NONE,Output:["userid","string1","subtype","decimal1","ts"] PARTITION_ONLY_SHUFFLE [RS_3] diff --git a/ql/src/test/results/clientpositive/tez/multi_count_distinct.q.out b/ql/src/test/results/clientpositive/tez/multi_count_distinct.q.out index 5f5a8523b8..8cf0ff44a6 100644 --- a/ql/src/test/results/clientpositive/tez/multi_count_distinct.q.out +++ b/ql/src/test/results/clientpositive/tez/multi_count_distinct.q.out @@ -51,7 +51,7 @@ Stage-0 SHUFFLE [RS_4] PartitionCols:_col0, _col1, _col2, _col3 Group By Operator [GBY_3] (rows=13 width=101) - Output:["_col0","_col1","_col2","_col3"],keys:_col0, _col1, _col2, 0 + Output:["_col0","_col1","_col2","_col3"],keys:_col0, _col1, _col2, 0L Select Operator [SEL_1] (rows=9 width=93) Output:["_col0","_col1","_col2"] TableScan [TS_0] (rows=9 width=93) @@ -131,7 +131,7 @@ Stage-0 SHUFFLE [RS_4] PartitionCols:_col0, _col1, _col2, _col3 Group By Operator [GBY_3] (rows=22 width=101) - Output:["_col0","_col1","_col2","_col3"],keys:_col0, _col1, _col2, 0 + Output:["_col0","_col1","_col2","_col3"],keys:_col0, _col1, _col2, 0L Select Operator [SEL_1] (rows=9 width=93) Output:["_col0","_col1","_col2"] TableScan [TS_0] (rows=9 width=93) diff --git a/ql/src/test/results/clientpositive/tez/tez-tag.q.out b/ql/src/test/results/clientpositive/tez/tez-tag.q.out index a006227d58..2ba1f6ed0e 100644 --- a/ql/src/test/results/clientpositive/tez/tez-tag.q.out +++ b/ql/src/test/results/clientpositive/tez/tez-tag.q.out @@ -273,7 +273,7 @@ Stage-0 Select Operator [SEL_17] (rows=8 width=175) Output:["_col1"] Filter Operator [FIL_38] (rows=8 width=175) - predicate:((UDFToDouble(key) < 0.0) and value is not null) + predicate:((UDFToDouble(key) < 0.0D) and value is not null) TableScan [TS_15] (rows=25 width=175) default@src1,c,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"] <-Reducer 2 [SIMPLE_EDGE] diff --git a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out index 4afe85c30c..b40d56c371 100644 --- a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out @@ -25,7 +25,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: (UDFToDouble(cint) / 0.0) (type: double), (UDFToDouble(ctinyint) / 0.0) (type: double), (UDFToDouble(cbigint) / 0.0) (type: double), (cdouble / 0.0) (type: double) + expressions: (UDFToDouble(cint) / 0.0D) (type: double), (UDFToDouble(ctinyint) / 0.0D) (type: double), (UDFToDouble(cbigint) / 0.0D) (type: double), (cdouble / 0.0D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -210,7 +210,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val 0), FilterLongColLessLongScalar(col 3:bigint, val 100000000)) - predicate: ((cbigint < 100000000) and (cbigint > 0)) (type: boolean) + predicate: ((cbigint < 100000000L) and (cbigint > 0L)) (type: boolean) Statistics: Num rows: 1365 Data size: 16320 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: (cbigint - 988888) (type: bigint), (cdouble / UDFToDouble((cbigint - 988888))) (type: double), (1.2 / CAST( (cbigint - 988888) AS decimal(19,0))) (type: decimal(22,21)) @@ -427,10 +427,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -500.0), FilterDoubleColLessDoubleScalar(col 5:double, val -199.0)) - predicate: ((cdouble < -199.0) and (cdouble >= -500.0)) (type: boolean) + predicate: ((cdouble < -199.0D) and (cdouble >= -500.0D)) (type: boolean) Statistics: Num rows: 1365 Data size: 16320 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (cdouble + 200.0) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0)) (type: double), ((cdouble + 200.0) / (cdouble + 200.0)) (type: double), (3.0 / (cdouble + 200.0)) (type: double), (1.2 / (cdouble + 200.0)) (type: double) + expressions: (cdouble + 200.0D) (type: double), (UDFToDouble(cbigint) / (cdouble + 200.0D)) (type: double), ((cdouble + 200.0D) / (cdouble + 200.0D)) (type: double), (3.0D / (cdouble + 200.0D)) (type: double), (1.2D / (cdouble + 200.0D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -644,7 +644,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 2:int, val 500000000), FilterDoubleColGreaterDoubleScalar(col 5:double, val 1.0E9), FilterLongColEqualLongScalar(col 0:tinyint, val 0)) - predicate: ((cdouble > 1.0E9) or (cint > 500000000) or (ctinyint = 0)) (type: boolean) + predicate: ((cdouble > 1.0E9D) or (cint > 500000000) or (ctinyint = 0Y)) (type: boolean) Statistics: Num rows: 4193 Data size: 75144 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: cint (type: int), cbigint (type: bigint), ctinyint (type: tinyint), (cint / (cint - 528534767)) (type: double), (cbigint / (cbigint - 1018195815)) (type: double), (ctinyint / ctinyint) (type: double), (cint % (cint - 528534767)) (type: int), (cbigint % (cbigint - 1018195815)) (type: bigint), (ctinyint % ctinyint) (type: tinyint) diff --git a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out index 37c630a9e9..7f2216dcd3 100644 --- a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out @@ -250,7 +250,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct] Select Operator - expressions: ctinyint (type: tinyint), (cdouble + 1.0) (type: double) + expressions: ctinyint (type: tinyint), (cdouble + 1.0D) (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/timestamp.q.out b/ql/src/test/results/clientpositive/timestamp.q.out index 9d0ceef50f..8fafd12548 100644 --- a/ql/src/test/results/clientpositive/timestamp.q.out +++ b/ql/src/test/results/clientpositive/timestamp.q.out @@ -20,7 +20,7 @@ STAGE PLANS: Select Operator Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: 2011-01-01 01:01:01.0 (type: timestamp) + keys: TIMESTAMP'2011-01-01 01:01:01.0' (type: timestamp) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE @@ -40,7 +40,7 @@ STAGE PLANS: Select Operator Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: 2011-01-01 01:01:01.0 (type: timestamp) + keys: TIMESTAMP'2011-01-01 01:01:01.0' (type: timestamp) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE @@ -57,7 +57,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2011-01-01 01:01:01.0 (type: timestamp) + expressions: TIMESTAMP'2011-01-01 01:01:01.0' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -108,7 +108,7 @@ STAGE PLANS: Select Operator Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: 2011-01-01 01:01:01.123 (type: timestamp) + keys: TIMESTAMP'2011-01-01 01:01:01.123' (type: timestamp) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE @@ -128,7 +128,7 @@ STAGE PLANS: Select Operator Statistics: Num rows: 1000 Data size: 40000 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: 2011-01-01 01:01:01.123 (type: timestamp) + keys: TIMESTAMP'2011-01-01 01:01:01.123' (type: timestamp) mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE @@ -145,7 +145,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2011-01-01 01:01:01.123 (type: timestamp) + expressions: TIMESTAMP'2011-01-01 01:01:01.123' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out index bc5ceb3f30..196607807d 100644 --- a/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out +++ b/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out @@ -49,7 +49,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) + expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -181,7 +181,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) + expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/timestamp_literal.q.out b/ql/src/test/results/clientpositive/timestamp_literal.q.out index 4e069695f4..67750bb74e 100644 --- a/ql/src/test/results/clientpositive/timestamp_literal.q.out +++ b/ql/src/test/results/clientpositive/timestamp_literal.q.out @@ -17,7 +17,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2011-01-01 01:01:01.0 (type: timestamp) + expressions: TIMESTAMP'2011-01-01 01:01:01.0' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/timestamptz.q.out b/ql/src/test/results/clientpositive/timestamptz.q.out index 7cff9a5c23..0b5c05c006 100644 --- a/ql/src/test/results/clientpositive/timestamptz.q.out +++ b/ql/src/test/results/clientpositive/timestamptz.q.out @@ -15,7 +15,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2005-01-02 18:01:00.0 US/Pacific (type: timestamp with local time zone) + expressions: TIMESTAMPLOCALTZ'2005-01-02 18:01:00.0 US/Pacific' (type: timestamp with local time zone) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -46,7 +46,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2016-01-03 12:26:34.0123 US/Pacific (type: timestamp with local time zone) + expressions: TIMESTAMPLOCALTZ'2016-01-03 12:26:34.0123 US/Pacific' (type: timestamp with local time zone) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -77,7 +77,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2016-01-02 16:00:00.0 US/Pacific (type: timestamp with local time zone) + expressions: TIMESTAMPLOCALTZ'2016-01-02 16:00:00.0 US/Pacific' (type: timestamp with local time zone) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -108,7 +108,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2016-01-03 04:34:56.38 US/Pacific (type: timestamp with local time zone) + expressions: TIMESTAMPLOCALTZ'2016-01-03 04:34:56.38 US/Pacific' (type: timestamp with local time zone) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/type_widening.q.out b/ql/src/test/results/clientpositive/type_widening.q.out index 30e556477e..3c0feb8458 100644 --- a/ql/src/test/results/clientpositive/type_widening.q.out +++ b/ql/src/test/results/clientpositive/type_widening.q.out @@ -14,7 +14,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 0 (type: bigint) + expressions: 0L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -47,7 +47,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 0 (type: bigint) + expressions: 0L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE Union @@ -60,7 +60,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 9223372036854775807 (type: bigint) + expressions: 9223372036854775807L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE Union @@ -1120,7 +1120,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (a > 2) (type: boolean) + predicate: (a > 2Y) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: a (type: tinyint), b (type: smallint) @@ -1144,7 +1144,7 @@ STAGE PLANS: alias: t1 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Filter Operator - predicate: (b < 2) (type: boolean) + predicate: (b < 2S) (type: boolean) Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: a (type: tinyint), b (type: smallint) diff --git a/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out b/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out index bf780037b8..54cb131d15 100644 --- a/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out +++ b/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out @@ -512,7 +512,7 @@ STAGE PLANS: alias: bucket Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CASE WHEN ((key < 100.0)) THEN (NaN) ELSE (key) END (type: double) + expressions: CASE WHEN ((key < 100.0D)) THEN (NaND) ELSE (key) END (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 58120 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/udf1.q.out b/ql/src/test/results/clientpositive/udf1.q.out index 27dbfa47d9..885de90288 100644 --- a/ql/src/test/results/clientpositive/udf1.q.out +++ b/ql/src/test/results/clientpositive/udf1.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 'TRUE' (type: string), 'FALSE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'FALSE' (type: string), 'FALSE' (type: string), 'FALSE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'FALSE' (type: string), 'TRUE' (type: string), 'TRUE' (type: string), 'acc' (type: string), 'abc' (type: string), 'abb' (type: string), 'hive' (type: string), 'hadoop' (type: string), 'AaAbAcA' (type: string), 'FALSE' (type: string) diff --git a/ql/src/test/results/clientpositive/udf4.q.out b/ql/src/test/results/clientpositive/udf4.q.out index 5d263ebf68..1ffc265be3 100644 --- a/ql/src/test/results/clientpositive/udf4.q.out +++ b/ql/src/test/results/clientpositive/udf4.q.out @@ -77,7 +77,7 @@ STAGE PLANS: alias: dest1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1 (type: decimal(1,0)), 2 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1 (type: decimal(2,0)), 1 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1.0 (type: double), null (type: double), 0.0 (type: double), 1 (type: decimal(2,0)), 2 (type: decimal(2,0)), -1 (type: decimal(2,0)), 1 (type: decimal(2,0)), rand(3) (type: double), 3 (type: int), -3 (type: int), 3 (type: int), -1 (type: int), -2 (type: int), -2 (type: tinyint), -2 (type: smallint), -2 (type: bigint), 0 (type: tinyint), 0 (type: smallint), 0 (type: int), 0 (type: bigint), 3 (type: tinyint), 3 (type: smallint), 3 (type: int), 3 (type: bigint), 2 (type: tinyint), 2 (type: smallint), 2 (type: int), 2 (type: bigint) + expressions: 1 (type: decimal(1,0)), 2 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1 (type: decimal(2,0)), 1 (type: decimal(2,0)), -2 (type: decimal(2,0)), 1.0D (type: double), null (type: double), 0.0D (type: double), 1 (type: decimal(2,0)), 2 (type: decimal(2,0)), -1 (type: decimal(2,0)), 1 (type: decimal(2,0)), rand(3) (type: double), 3 (type: int), -3 (type: int), 3 (type: int), -1 (type: int), -2 (type: int), -2Y (type: tinyint), -2S (type: smallint), -2L (type: bigint), 0Y (type: tinyint), 0S (type: smallint), 0 (type: int), 0L (type: bigint), 3Y (type: tinyint), 3S (type: smallint), 3 (type: int), 3L (type: bigint), 2Y (type: tinyint), 2S (type: smallint), 2 (type: int), 2L (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33 Statistics: Num rows: 1 Data size: 1248 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/udf5.q.out b/ql/src/test/results/clientpositive/udf5.q.out index 3fa801a078..d96620aeb1 100644 --- a/ql/src/test/results/clientpositive/udf5.q.out +++ b/ql/src/test/results/clientpositive/udf5.q.out @@ -33,7 +33,7 @@ STAGE PLANS: alias: dest1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: '2008-11-11 15:32:20' (type: string), 2008-11-11 (type: date), 1 (type: int), 11 (type: int), 2008 (type: int), 1 (type: int), 11 (type: int), 2008 (type: int) + expressions: '2008-11-11 15:32:20' (type: string), DATE'2008-11-11' (type: date), 1 (type: int), 11 (type: int), 2008 (type: int), 1 (type: int), 11 (type: int), 2008 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 183 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf6.q.out b/ql/src/test/results/clientpositive/udf6.q.out index 4193d340e4..69e0130dd3 100644 --- a/ql/src/test/results/clientpositive/udf6.q.out +++ b/ql/src/test/results/clientpositive/udf6.q.out @@ -75,7 +75,7 @@ STAGE PLANS: alias: dest1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1 (type: int), 2 (type: int), 2 (type: int), 'a' (type: string), 0.1 (type: decimal(1,1)), 2 (type: bigint), 126 (type: tinyint), 128 (type: smallint), 128 (type: int), 1.0 (type: double), '128' (type: string) + expressions: 1 (type: int), 2 (type: int), 2 (type: int), 'a' (type: string), 0.1 (type: decimal(1,1)), 2L (type: bigint), 126Y (type: tinyint), 128S (type: smallint), 128 (type: int), 1.0D (type: double), '128' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1 Data size: 324 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf7.q.out b/ql/src/test/results/clientpositive/udf7.q.out index 12ebb2db64..909556947a 100644 --- a/ql/src/test/results/clientpositive/udf7.q.out +++ b/ql/src/test/results/clientpositive/udf7.q.out @@ -47,7 +47,7 @@ STAGE PLANS: alias: dest1 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1.098612288668 (type: double), null (type: double), null (type: double), 1.098612288668 (type: double), null (type: double), null (type: double), 1.584962500721 (type: double), null (type: double), null (type: double), 0.47712125472 (type: double), null (type: double), null (type: double), 1.584962500721 (type: double), null (type: double), null (type: double), null (type: double), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), 1.0 (type: double), 8.0 (type: double), 8.0 (type: double) + expressions: 1.098612288668D (type: double), null (type: double), null (type: double), 1.098612288668D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), 0.47712125472D (type: double), null (type: double), null (type: double), 1.584962500721D (type: double), null (type: double), null (type: double), null (type: double), -1.0D (type: double), 7.389056098931D (type: double), 8.0D (type: double), 8.0D (type: double), 0.125D (type: double), 8.0D (type: double), 2.0D (type: double), NaND (type: double), 1.0D (type: double), 1.0D (type: double), 8.0D (type: double), 8.0D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf9.q.out b/ql/src/test/results/clientpositive/udf9.q.out index a9bd954bfc..a247a8ecd2 100644 --- a/ql/src/test/results/clientpositive/udf9.q.out +++ b/ql/src/test/results/clientpositive/udf9.q.out @@ -32,10 +32,10 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: -1 (type: int), 2 (type: int), 32 (type: int), -1 (type: int), 2009-01-01 (type: date), 2009-12-31 (type: date), 2008-03-01 (type: date), 2009-03-02 (type: date), 2008-02-28 (type: date), 2009-02-27 (type: date), 2008-12-31 (type: date), 2008-01-02 (type: date), 2008-02-26 (type: date), 2009-02-26 (type: date), 2006-02-28 (type: date), 2005-02-28 (type: date) + expressions: -1 (type: int), 2 (type: int), 32 (type: int), -1 (type: int), DATE'2009-01-01' (type: date), DATE'2009-12-31' (type: date), DATE'2008-03-01' (type: date), DATE'2009-03-02' (type: date), DATE'2008-02-28' (type: date), DATE'2009-02-27' (type: date), DATE'2008-12-31' (type: date), DATE'2008-01-02' (type: date), DATE'2008-02-26' (type: date), DATE'2009-02-26' (type: date), DATE'2006-02-28' (type: date), DATE'2005-02-28' (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/udf_10_trims.q.out b/ql/src/test/results/clientpositive/udf_10_trims.q.out index 4f08014ee2..5a53eb1aca 100644 --- a/ql/src/test/results/clientpositive/udf_10_trims.q.out +++ b/ql/src/test/results/clientpositive/udf_10_trims.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 'abc' (type: string) diff --git a/ql/src/test/results/clientpositive/udf_E.q.out b/ql/src/test/results/clientpositive/udf_E.q.out index 4ba0e1f11d..fe6cd9d7d3 100644 --- a/ql/src/test/results/clientpositive/udf_E.q.out +++ b/ql/src/test/results/clientpositive/udf_E.q.out @@ -17,7 +17,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2.718281828459045 (type: double) + expressions: 2.718281828459045D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -66,7 +66,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2.718281828459045 (type: double) + expressions: 2.718281828459045D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_PI.q.out b/ql/src/test/results/clientpositive/udf_PI.q.out index 0479eef4e7..df911dfc80 100644 --- a/ql/src/test/results/clientpositive/udf_PI.q.out +++ b/ql/src/test/results/clientpositive/udf_PI.q.out @@ -17,7 +17,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3.141592653589793 (type: double) + expressions: 3.141592653589793D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -66,7 +66,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3.141592653589793 (type: double) + expressions: 3.141592653589793D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_abs.q.out b/ql/src/test/results/clientpositive/udf_abs.q.out index 5a8c4d7cfd..dcd4b36acd 100644 --- a/ql/src/test/results/clientpositive/udf_abs.q.out +++ b/ql/src/test/results/clientpositive/udf_abs.q.out @@ -44,7 +44,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 0 (type: int), 1 (type: int), 123 (type: int), 9223372036854775807 (type: bigint), 9223372036854775807 (type: bigint) + expressions: 0 (type: int), 1 (type: int), 123 (type: int), 9223372036854775807L (type: bigint), 9223372036854775807L (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 500 Data size: 14000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_between.q.out b/ql/src/test/results/clientpositive/udf_between.q.out index 8070735502..9262d42cf9 100644 --- a/ql/src/test/results/clientpositive/udf_between.q.out +++ b/ql/src/test/results/clientpositive/udf_between.q.out @@ -26,7 +26,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) + 100.0) BETWEEN 100.0 AND 200.0 (type: boolean) + predicate: (UDFToDouble(key) + 100.0D) BETWEEN 100.0D AND 200.0D (type: boolean) Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -81,7 +81,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (not (UDFToDouble(key) + 100.0) BETWEEN 100.0 AND 200.0) (type: boolean) + predicate: (not (UDFToDouble(key) + 100.0D) BETWEEN 100.0D AND 200.0D) (type: boolean) Statistics: Num rows: 445 Data size: 4727 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/udf_cbrt.q.out b/ql/src/test/results/clientpositive/udf_cbrt.q.out index 4c30ad4b17..f5a19e6aca 100644 --- a/ql/src/test/results/clientpositive/udf_cbrt.q.out +++ b/ql/src/test/results/clientpositive/udf_cbrt.q.out @@ -30,7 +30,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3.0 (type: double) + expressions: 3.0D (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_crc32.q.out b/ql/src/test/results/clientpositive/udf_crc32.q.out index 5d2888ac0a..43be1948e5 100644 --- a/ql/src/test/results/clientpositive/udf_crc32.q.out +++ b/ql/src/test/results/clientpositive/udf_crc32.q.out @@ -32,7 +32,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2743272264 (type: bigint) + expressions: 2743272264L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_degrees.q.out b/ql/src/test/results/clientpositive/udf_degrees.q.out index 867663bb6a..8490c442eb 100644 --- a/ql/src/test/results/clientpositive/udf_degrees.q.out +++ b/ql/src/test/results/clientpositive/udf_degrees.q.out @@ -17,7 +17,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 180.0 (type: double) + expressions: 180.0D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -66,7 +66,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 180.0 (type: double) + expressions: 180.0D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_example_add.q.out b/ql/src/test/results/clientpositive/udf_example_add.q.out index 7916679e5e..b092383804 100644 --- a/ql/src/test/results/clientpositive/udf_example_add.q.out +++ b/ql/src/test/results/clientpositive/udf_example_add.q.out @@ -36,7 +36,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) + expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003D (type: double), 6.6D (type: double), 11.0D (type: double), 10.4D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/udf_factorial.q.out b/ql/src/test/results/clientpositive/udf_factorial.q.out index d6757aa194..4700c6989b 100644 --- a/ql/src/test/results/clientpositive/udf_factorial.q.out +++ b/ql/src/test/results/clientpositive/udf_factorial.q.out @@ -31,7 +31,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 120 (type: bigint) + expressions: 120L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_folder_constants.q.out b/ql/src/test/results/clientpositive/udf_folder_constants.q.out index 70a8c0197c..33070c7cc9 100644 --- a/ql/src/test/results/clientpositive/udf_folder_constants.q.out +++ b/ql/src/test/results/clientpositive/udf_folder_constants.q.out @@ -94,7 +94,7 @@ STAGE PLANS: 1 _col0 (type: int) Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 978336000 (type: bigint) + expressions: 978336000L (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out b/ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out index f00e3a1108..d650e390a1 100644 --- a/ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out +++ b/ql/src/test/results/clientpositive/udf_from_utc_timestamp.q.out @@ -27,7 +27,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2012-02-11 02:30:00.0 (type: timestamp) + expressions: TIMESTAMP'2012-02-11 02:30:00.0' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_hour.q.out b/ql/src/test/results/clientpositive/udf_hour.q.out index d37c079a47..d26d71f323 100644 --- a/ql/src/test/results/clientpositive/udf_hour.q.out +++ b/ql/src/test/results/clientpositive/udf_hour.q.out @@ -38,7 +38,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 13 (type: int), 13 (type: int), null (type: int) diff --git a/ql/src/test/results/clientpositive/udf_if.q.out b/ql/src/test/results/clientpositive/udf_if.q.out index eac4fe3e2b..c46a7302ec 100644 --- a/ql/src/test/results/clientpositive/udf_if.q.out +++ b/ql/src/test/results/clientpositive/udf_if.q.out @@ -94,7 +94,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 128 (type: smallint), 1.1 (type: decimal(11,1)), 'ABC' (type: string), '12.3' (type: string) + expressions: 128S (type: smallint), 1.1 (type: decimal(11,1)), 'ABC' (type: string), '12.3' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 145500 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_java_method.q.out b/ql/src/test/results/clientpositive/udf_java_method.q.out index f2f146d525..6a6211180b 100644 --- a/ql/src/test/results/clientpositive/udf_java_method.q.out +++ b/ql/src/test/results/clientpositive/udf_java_method.q.out @@ -47,7 +47,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE GatherStats: false Select Operator - expressions: reflect('java.lang.String','valueOf',1) (type: string), reflect('java.lang.String','isEmpty') (type: string), reflect('java.lang.Math','max',2,3) (type: string), reflect('java.lang.Math','min',2,3) (type: string), reflect('java.lang.Math','round',2.5) (type: string), round(reflect('java.lang.Math','exp',1.0), 6) (type: double), reflect('java.lang.Math','floor',1.9) (type: string) + expressions: reflect('java.lang.String','valueOf',1) (type: string), reflect('java.lang.String','isEmpty') (type: string), reflect('java.lang.Math','max',2,3) (type: string), reflect('java.lang.Math','min',2,3) (type: string), reflect('java.lang.Math','round',2.5D) (type: string), round(reflect('java.lang.Math','exp',1.0D), 6) (type: double), reflect('java.lang.Math','floor',1.9D) (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 500 Data size: 556000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_like.q.out b/ql/src/test/results/clientpositive/udf_like.q.out index 64c17bd805..c248e545f9 100644 --- a/ql/src/test/results/clientpositive/udf_like.q.out +++ b/ql/src/test/results/clientpositive/udf_like.q.out @@ -37,7 +37,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: true (type: boolean), false (type: boolean), true (type: boolean), true (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), true (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), false (type: boolean), true (type: boolean) diff --git a/ql/src/test/results/clientpositive/udf_lower.q.out b/ql/src/test/results/clientpositive/udf_lower.q.out index e816594070..3d995d1c9b 100644 --- a/ql/src/test/results/clientpositive/udf_lower.q.out +++ b/ql/src/test/results/clientpositive/udf_lower.q.out @@ -32,7 +32,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 'abc 123' (type: string), 'ABC 123' (type: string) diff --git a/ql/src/test/results/clientpositive/udf_minute.q.out b/ql/src/test/results/clientpositive/udf_minute.q.out index f7141d217d..13f2125466 100644 --- a/ql/src/test/results/clientpositive/udf_minute.q.out +++ b/ql/src/test/results/clientpositive/udf_minute.q.out @@ -38,7 +38,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 14 (type: int), 14 (type: int), null (type: int) diff --git a/ql/src/test/results/clientpositive/udf_months_between.q.out b/ql/src/test/results/clientpositive/udf_months_between.q.out index 2335f0bd5b..d78d9b375c 100644 --- a/ql/src/test/results/clientpositive/udf_months_between.q.out +++ b/ql/src/test/results/clientpositive/udf_months_between.q.out @@ -32,7 +32,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1.03225806 (type: double) + expressions: 1.03225806D (type: double) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_parse_url.q.out b/ql/src/test/results/clientpositive/udf_parse_url.q.out index 0ea858493a..dc4341cda2 100644 --- a/ql/src/test/results/clientpositive/udf_parse_url.q.out +++ b/ql/src/test/results/clientpositive/udf_parse_url.q.out @@ -59,7 +59,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 'facebook.com' (type: string), '/path1/p.php' (type: string), 'k1=v1&k2=v2' (type: string), 'Ref1' (type: string), 'v2' (type: string), 'v1' (type: string), null (type: string), '/path1/p.php?k1=v1&k2=v2' (type: string), 'http' (type: string), null (type: string), 'facebook.com' (type: string) diff --git a/ql/src/test/results/clientpositive/udf_radians.q.out b/ql/src/test/results/clientpositive/udf_radians.q.out index 14bd78c598..0ce8281110 100644 --- a/ql/src/test/results/clientpositive/udf_radians.q.out +++ b/ql/src/test/results/clientpositive/udf_radians.q.out @@ -17,7 +17,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1.000000357564167 (type: double) + expressions: 1.000000357564167D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -75,7 +75,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 1.000000357564167 (type: double) + expressions: 1.000000357564167D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_reflect.q.out b/ql/src/test/results/clientpositive/udf_reflect.q.out index a9863f7eec..97e40d1509 100644 --- a/ql/src/test/results/clientpositive/udf_reflect.q.out +++ b/ql/src/test/results/clientpositive/udf_reflect.q.out @@ -49,7 +49,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: reflect('java.lang.String','valueOf',1) (type: string), reflect('java.lang.String','isEmpty') (type: string), reflect('java.lang.Math','max',2,3) (type: string), reflect('java.lang.Math','min',2,3) (type: string), reflect('java.lang.Math','round',2.5) (type: string), round(reflect('java.lang.Math','exp',1.0), 6) (type: double), reflect('java.lang.Math','floor',1.9) (type: string), reflect('java.lang.Integer','valueOf',key,16) (type: string) + expressions: reflect('java.lang.String','valueOf',1) (type: string), reflect('java.lang.String','isEmpty') (type: string), reflect('java.lang.Math','max',2,3) (type: string), reflect('java.lang.Math','min',2,3) (type: string), reflect('java.lang.Math','round',2.5D) (type: string), round(reflect('java.lang.Math','exp',1.0D), 6) (type: double), reflect('java.lang.Math','floor',1.9D) (type: string), reflect('java.lang.Integer','valueOf',key,16) (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink diff --git a/ql/src/test/results/clientpositive/udf_reflect2.q.out b/ql/src/test/results/clientpositive/udf_reflect2.q.out index a1074f8a81..4834cd633c 100644 --- a/ql/src/test/results/clientpositive/udf_reflect2.q.out +++ b/ql/src/test/results/clientpositive/udf_reflect2.q.out @@ -93,7 +93,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: UDFToInteger(key) (type: int), reflect2(UDFToInteger(key),'byteValue') (type: tinyint), reflect2(UDFToInteger(key),'shortValue') (type: smallint), reflect2(UDFToInteger(key),'intValue') (type: int), reflect2(UDFToInteger(key),'longValue') (type: bigint), reflect2(UDFToInteger(key),'floatValue') (type: float), reflect2(UDFToInteger(key),'doubleValue') (type: double), reflect2(UDFToInteger(key),'toString') (type: string), value (type: string), reflect2(value,'concat','_concat') (type: string), reflect2(value,'contains','86') (type: boolean), reflect2(value,'startsWith','v') (type: boolean), reflect2(value,'endsWith','6') (type: boolean), reflect2(value,'equals','val_86') (type: boolean), reflect2(value,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(value,'getBytes') (type: binary), reflect2(value,'indexOf','1') (type: int), reflect2(value,'lastIndexOf','1') (type: int), reflect2(value,'replace','val','VALUE') (type: string), reflect2(value,'substring',1) (type: string), reflect2(value,'substring',1,5) (type: string), reflect2(value,'toUpperCase') (type: string), reflect2(value,'trim') (type: string), 2013-02-15 19:41:20.0 (type: timestamp), 113 (type: int), 1 (type: int), 5 (type: int), 19 (type: int), 41 (type: int), 20 (type: int), 1360986080000 (type: bigint) + expressions: UDFToInteger(key) (type: int), reflect2(UDFToInteger(key),'byteValue') (type: tinyint), reflect2(UDFToInteger(key),'shortValue') (type: smallint), reflect2(UDFToInteger(key),'intValue') (type: int), reflect2(UDFToInteger(key),'longValue') (type: bigint), reflect2(UDFToInteger(key),'floatValue') (type: float), reflect2(UDFToInteger(key),'doubleValue') (type: double), reflect2(UDFToInteger(key),'toString') (type: string), value (type: string), reflect2(value,'concat','_concat') (type: string), reflect2(value,'contains','86') (type: boolean), reflect2(value,'startsWith','v') (type: boolean), reflect2(value,'endsWith','6') (type: boolean), reflect2(value,'equals','val_86') (type: boolean), reflect2(value,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(value,'getBytes') (type: binary), reflect2(value,'indexOf','1') (type: int), reflect2(value,'lastIndexOf','1') (type: int), reflect2(value,'replace','val','VALUE') (type: string), reflect2(value,'substring',1) (type: string), reflect2(value,'substring',1,5) (type: string), reflect2(value,'toUpperCase') (type: string), reflect2(value,'trim') (type: string), TIMESTAMP'2013-02-15 19:41:20.0' (type: timestamp), 113 (type: int), 1 (type: int), 5 (type: int), 19 (type: int), 41 (type: int), 20 (type: int), 1360986080000L (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/udf_second.q.out b/ql/src/test/results/clientpositive/udf_second.q.out index c1f3504e9c..8042a30ab4 100644 --- a/ql/src/test/results/clientpositive/udf_second.q.out +++ b/ql/src/test/results/clientpositive/udf_second.q.out @@ -38,7 +38,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 86.0) (type: boolean) + predicate: (UDFToDouble(key) = 86.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: 15 (type: int), 15 (type: int), null (type: int) diff --git a/ql/src/test/results/clientpositive/udf_sign.q.out b/ql/src/test/results/clientpositive/udf_sign.q.out index fe8440229b..35e482f348 100644 --- a/ql/src/test/results/clientpositive/udf_sign.q.out +++ b/ql/src/test/results/clientpositive/udf_sign.q.out @@ -17,7 +17,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 0.0 (type: double) + expressions: 0.0D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink @@ -83,7 +83,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 0.0 (type: double) + expressions: 0.0D (type: double) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/udf_to_utc_timestamp.q.out b/ql/src/test/results/clientpositive/udf_to_utc_timestamp.q.out index 7bd9aaf6e1..4abf0ed27c 100644 --- a/ql/src/test/results/clientpositive/udf_to_utc_timestamp.q.out +++ b/ql/src/test/results/clientpositive/udf_to_utc_timestamp.q.out @@ -27,7 +27,7 @@ STAGE PLANS: Row Limit Per Split: 1 Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 2012-02-11 18:30:00.0 (type: timestamp) + expressions: TIMESTAMP'2012-02-11 18:30:00.0' (type: timestamp) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE ListSink diff --git a/ql/src/test/results/clientpositive/union.q.out b/ql/src/test/results/clientpositive/union.q.out index 67c9ba4854..dc9baf29c9 100644 --- a/ql/src/test/results/clientpositive/union.q.out +++ b/ql/src/test/results/clientpositive/union.q.out @@ -31,7 +31,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 100.0) (type: boolean) + predicate: (UDFToDouble(key) < 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -50,7 +50,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) > 100.0) (type: boolean) + predicate: (UDFToDouble(key) > 100.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/union20.q.out b/ql/src/test/results/clientpositive/union20.q.out index d911b907f7..860179a87c 100644 --- a/ql/src/test/results/clientpositive/union20.q.out +++ b/ql/src/test/results/clientpositive/union20.q.out @@ -77,7 +77,7 @@ STAGE PLANS: alias: s2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -104,7 +104,7 @@ STAGE PLANS: alias: s4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/union22.q.out b/ql/src/test/results/clientpositive/union22.q.out index ce4fa62359..50d06c0cd2 100644 --- a/ql/src/test/results/clientpositive/union22.q.out +++ b/ql/src/test/results/clientpositive/union22.q.out @@ -144,7 +144,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(k0) > 50.0) and (UDFToDouble(k1) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(k0) > 50.0D) and (UDFToDouble(k1) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 1862 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k3 (type: string), k4 (type: string) @@ -170,7 +170,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k1) > 20.0) (type: boolean) + predicate: (UDFToDouble(k1) > 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 3693 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k2 (type: string), ds (type: string) @@ -323,7 +323,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k0) <= 50.0) (type: boolean) + predicate: (UDFToDouble(k0) <= 50.0D) (type: boolean) Statistics: Num rows: 166 Data size: 5622 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k2 (type: string), k3 (type: string), k4 (type: string) @@ -589,7 +589,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(k1) > 20.0) (type: boolean) + predicate: (UDFToDouble(k1) > 20.0D) (type: boolean) Statistics: Num rows: 166 Data size: 3693 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k2 (type: string), ds (type: string) @@ -610,7 +610,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: ((UDFToDouble(k0) > 50.0) and (UDFToDouble(k1) > 20.0)) (type: boolean) + predicate: ((UDFToDouble(k0) > 50.0D) and (UDFToDouble(k1) > 20.0D)) (type: boolean) Statistics: Num rows: 55 Data size: 1862 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: k1 (type: string), k3 (type: string), k4 (type: string) diff --git a/ql/src/test/results/clientpositive/union24.q.out b/ql/src/test/results/clientpositive/union24.q.out index 8f8c1709c5..dfa614ed68 100644 --- a/ql/src/test/results/clientpositive/union24.q.out +++ b/ql/src/test/results/clientpositive/union24.q.out @@ -83,7 +83,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -187,7 +187,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -223,7 +223,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -259,7 +259,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -577,7 +577,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -597,7 +597,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -752,7 +752,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -788,7 +788,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -1050,7 +1050,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -1070,7 +1070,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -1294,7 +1294,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) @@ -1330,7 +1330,7 @@ STAGE PLANS: GatherStats: false Filter Operator isSamplingPred: false - predicate: (UDFToDouble(key) < 10.0) (type: boolean) + predicate: (UDFToDouble(key) < 10.0D) (type: boolean) Statistics: Num rows: 103 Data size: 494 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), count (type: bigint) diff --git a/ql/src/test/results/clientpositive/union27.q.out b/ql/src/test/results/clientpositive/union27.q.out index 34b46f14e1..8e3dae9e57 100644 --- a/ql/src/test/results/clientpositive/union27.q.out +++ b/ql/src/test/results/clientpositive/union27.q.out @@ -50,7 +50,7 @@ STAGE PLANS: alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) @@ -65,7 +65,7 @@ STAGE PLANS: alias: dim_pho Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -83,7 +83,7 @@ STAGE PLANS: alias: jackson_sev_add Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 97.0) (type: boolean) + predicate: (UDFToDouble(key) = 97.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/union33.q.out b/ql/src/test/results/clientpositive/union33.q.out index d12db7951b..bd1c9a9cc3 100644 --- a/ql/src/test/results/clientpositive/union33.q.out +++ b/ql/src/test/results/clientpositive/union33.q.out @@ -108,7 +108,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) @@ -391,7 +391,7 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 0.0) (type: boolean) + predicate: (UDFToDouble(key) = 0.0D) (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) diff --git a/ql/src/test/results/clientpositive/union_pos_alias.q.out b/ql/src/test/results/clientpositive/union_pos_alias.q.out index 65e736f3ed..493316b3c5 100644 --- a/ql/src/test/results/clientpositive/union_pos_alias.q.out +++ b/ql/src/test/results/clientpositive/union_pos_alias.q.out @@ -535,7 +535,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 13 Data size: 137 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col3 = 2) (type: boolean) + predicate: (_col3 = 2L) (type: boolean) Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: bigint), _col0 (type: int), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out index f1f2dd692d..500c123532 100644 --- a/ql/src/test/results/clientpositive/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/union_remove_10.q.out @@ -67,7 +67,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -163,7 +163,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out index 702aef8aaf..29141e1b01 100644 --- a/ql/src/test/results/clientpositive/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/union_remove_12.q.out @@ -61,7 +61,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out index 702aef8aaf..29141e1b01 100644 --- a/ql/src/test/results/clientpositive/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/union_remove_14.q.out @@ -61,7 +61,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out index 8a71f96655..8f9be65d0b 100644 --- a/ql/src/test/results/clientpositive/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/union_remove_19.q.out @@ -234,7 +234,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 7.0) (type: boolean) + predicate: (UDFToDouble(key) = 7.0D) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -281,7 +281,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (UDFToDouble(key) = 7.0) (type: boolean) + predicate: (UDFToDouble(key) = 7.0D) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -380,7 +380,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0) (type: boolean) + predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0D) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() @@ -431,7 +431,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0) (type: boolean) + predicate: ((UDFToDouble(key) + UDFToDouble(key)) >= 7.0D) (type: boolean) Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out index 0b84fb0c18..cd1f243a2a 100644 --- a/ql/src/test/results/clientpositive/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/union_remove_2.q.out @@ -106,7 +106,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -125,7 +125,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out index efe59a12d2..f60b88bbd8 100644 --- a/ql/src/test/results/clientpositive/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/union_remove_5.q.out @@ -150,7 +150,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -169,7 +169,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out index 742e5b19db..096d98db3a 100644 --- a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out @@ -305,7 +305,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: (_col0 - 200) (type: bigint) + expressions: (_col0 - 200L) (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -462,7 +462,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), (_col1 * 2) (type: bigint) + expressions: _col0 (type: string), (_col1 * 2L) (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out index 2093fe6a0a..f1ae75ac32 100644 --- a/ql/src/test/results/clientpositive/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/union_remove_8.q.out @@ -106,7 +106,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 1 (type: bigint) + expressions: key (type: string), 1L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -125,7 +125,7 @@ STAGE PLANS: alias: inputtbl1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string), 2 (type: bigint) + expressions: key (type: string), 2L (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.out index d16c7e0a2e..4b06016702 100644 --- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out +++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out @@ -136,7 +136,7 @@ STAGE PLANS: projectedOutputColumnNums: [2] Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: avg(50), avg(50.0), avg(50) + aggregations: avg(50), avg(50.0D), avg(50) Group By Vectorization: aggregators: VectorUDAFAvgLong(ConstantVectorExpression(val 50) -> 12:int) -> struct, VectorUDAFAvgDouble(ConstantVectorExpression(val 50.0) -> 13:double) -> struct, VectorUDAFAvgDecimal(ConstantVectorExpression(val 50) -> 14:decimal(10,0)) -> struct className: VectorGroupByOperator diff --git a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out index ca06456cdf..48d38c316e 100644 --- a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out +++ b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out @@ -111,7 +111,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0), 2) (type: double) + expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -306,7 +306,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0), 2) (type: double) + expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vector_date_1.q.out b/ql/src/test/results/clientpositive/vector_date_1.q.out index 93f9a7116d..050f9c3150 100644 --- a/ql/src/test/results/clientpositive/vector_date_1.q.out +++ b/ql/src/test/results/clientpositive/vector_date_1.q.out @@ -387,7 +387,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:dt1:date, 1:dt2:date, 2:ROW__ID:struct] Select Operator - expressions: dt1 (type: date), (dt1 <> 1970-01-01) (type: boolean), (dt1 >= 1970-01-01) (type: boolean), (dt1 > 1970-01-01) (type: boolean), (dt1 <= 2100-01-01) (type: boolean), (dt1 < 2100-01-01) (type: boolean), (1970-01-01 <> dt1) (type: boolean), (1970-01-01 <= dt1) (type: boolean), (1970-01-01 < dt1) (type: boolean) + expressions: dt1 (type: date), (dt1 <> DATE'1970-01-01') (type: boolean), (dt1 >= DATE'1970-01-01') (type: boolean), (dt1 > DATE'1970-01-01') (type: boolean), (dt1 <= DATE'2100-01-01') (type: boolean), (dt1 < DATE'2100-01-01') (type: boolean), (DATE'1970-01-01' <> dt1) (type: boolean), (DATE'1970-01-01' <= dt1) (type: boolean), (DATE'1970-01-01' < dt1) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator @@ -526,7 +526,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:dt1:date, 1:dt2:date, 2:ROW__ID:struct] Select Operator - expressions: dt1 (type: date), (dt1 = 1970-01-01) (type: boolean), (dt1 <= 1970-01-01) (type: boolean), (dt1 < 1970-01-01) (type: boolean), (dt1 >= 2100-01-01) (type: boolean), (dt1 > 2100-01-01) (type: boolean), (1970-01-01 = dt1) (type: boolean), (1970-01-01 >= dt1) (type: boolean), (1970-01-01 > dt1) (type: boolean) + expressions: dt1 (type: date), (dt1 = DATE'1970-01-01') (type: boolean), (dt1 <= DATE'1970-01-01') (type: boolean), (dt1 < DATE'1970-01-01') (type: boolean), (dt1 >= DATE'2100-01-01') (type: boolean), (dt1 > DATE'2100-01-01') (type: boolean), (DATE'1970-01-01' = dt1) (type: boolean), (DATE'1970-01-01' >= dt1) (type: boolean), (DATE'1970-01-01' > dt1) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator @@ -811,10 +811,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterDateScalarEqualDateColumn(val 11323, col 0:date), FilterDateColNotEqualDateScalar(col 0:date, val 0), FilterDateScalarNotEqualDateColumn(val 0, col 0:date)) - predicate: ((1970-01-01 <> dt1) and (2001-01-01 = dt1) and (dt1 <> 1970-01-01)) (type: boolean) + predicate: ((DATE'1970-01-01' <> dt1) and (DATE'2001-01-01' = dt1) and (dt1 <> DATE'1970-01-01')) (type: boolean) Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 2001-01-01 (type: date), dt2 (type: date) + expressions: DATE'2001-01-01' (type: date), dt2 (type: date) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator @@ -919,7 +919,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColumnInList(col 0:date, values [0, 11323]) - predicate: (dt1) IN (1970-01-01, 2001-01-01) (type: boolean) + predicate: (dt1) IN (DATE'1970-01-01', DATE'2001-01-01') (type: boolean) Statistics: Num rows: 2 Data size: 149 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dt1 (type: date) diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out index 2f7ce319ae..824487165d 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out @@ -127,7 +127,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 6144 Data size: 1082441 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col9 > 1) (type: boolean) + predicate: (_col9 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 360813 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: bigint), _col6 (type: decimal(23,14)), _col7 (type: decimal(23,14)), _col8 (type: decimal(33,14)) @@ -268,7 +268,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 6144 Data size: 1082441 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col15 > 1) (type: boolean) + predicate: (_col15 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 360813 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(20,10)), _col3 (type: decimal(20,10)), _col4 (type: decimal(30,10)), _col5 (type: decimal(24,14)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(23,14)), _col10 (type: decimal(23,14)), _col11 (type: decimal(33,14)), _col12 (type: decimal(27,18)), _col13 (type: double), _col14 (type: double) @@ -443,7 +443,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 6144 Data size: 173221 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col9 > 1) (type: boolean) + predicate: (_col9 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 57740 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 (type: decimal(26,0)) @@ -603,7 +603,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 6144 Data size: 173221 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col15 > 1) (type: boolean) + predicate: (_col15 > 1L) (type: boolean) Statistics: Num rows: 2048 Data size: 57740 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 (type: decimal(15,9)), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: decimal(16,0)), _col10 (type: decimal(16,0)), _col11 (type: decimal(26,0)), _col12 (type: decimal(20,4)), _col13 (type: double), _col14 (type: double) diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out index 0ee65ebc31..b8581e4040 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out @@ -121,10 +121,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 7:double, val -1.0)(children: FuncSinDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(20,10)) -> 6:double) -> 7:double)) - predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 366928 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(20,10)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double) + expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(20,10)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 Select Vectorization: className: VectorSelectOperator @@ -363,10 +363,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 7:double, val -1.0)(children: FuncSinDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 6:double) -> 7:double)) - predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 366865 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdecimal1 (type: decimal(12,4)), round(cdecimal1, 2) (type: decimal(11,2)), round(cdecimal1) (type: decimal(9,0)), floor(cdecimal1) (type: decimal(9,0)), ceil(cdecimal1) (type: decimal(9,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(12,4)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(12,4)), (- cdecimal1) (type: decimal(12,4)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double) + expressions: cdecimal1 (type: decimal(12,4)), round(cdecimal1, 2) (type: decimal(11,2)), round(cdecimal1) (type: decimal(9,0)), floor(cdecimal1) (type: decimal(9,0)), ceil(cdecimal1) (type: decimal(9,0)), round(exp(cdecimal1), 58) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601)) (type: double), log(2, cdecimal1) (type: double), power(log2(cdecimal1), 2) (type: double), power(log2(cdecimal1), 2) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(12,4)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(12,4)), (- cdecimal1) (type: decimal(12,4)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out index 2bcedd04b3..03e517245c 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out @@ -92,7 +92,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 19 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), null (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double) + expressions: null (type: double), null (type: double), 1.4711276743037347D (type: double), -0.8390715290764524D (type: double), -0.5440211108893698D (type: double), 0.6483608274590866D (type: double), 0.17453292519943295D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -197,7 +197,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 19 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Select Vectorization: className: VectorSelectOperator @@ -308,7 +308,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), null (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double) + expressions: null (type: double), null (type: double), 1.4711276743037347D (type: double), -0.8390715290764524D (type: double), -0.5440211108893698D (type: double), 0.6483608274590866D (type: double), 0.17453292519943295D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -413,7 +413,7 @@ STAGE PLANS: predicate: (key = 10) (type: boolean) Statistics: Num rows: 1 Data size: 3590 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double) + expressions: 22026.465794806718D (type: double), 2.302585092994046D (type: double), 2.302585092994046D (type: double), 1.0D (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0D (type: double), 3.1622776601683795D (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vector_elt.q.out b/ql/src/test/results/clientpositive/vector_elt.q.out index 0b51d83d7b..474efb5b76 100644 --- a/ql/src/test/results/clientpositive/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/vector_elt.q.out @@ -28,7 +28,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 0:tinyint, val 0) - predicate: (ctinyint > 0) (type: boolean) + predicate: (ctinyint > 0Y) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ((UDFToInteger(ctinyint) % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((UDFToInteger(ctinyint) % 2) + 1), cstring1, cint) (type: string) diff --git a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out index 0815bdb5ff..01c50966d2 100644 --- a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out @@ -199,7 +199,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 2:bigint, val 0), FilterExprAndExpr(children: SelectColumnIsNull(col 4:boolean), SelectColumnIsNotNull(col 0:string), FilterLongColGreaterEqualLongColumn(col 3:bigint, col 2:bigint))) - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -301,7 +301,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col5 Statistics: Num rows: 550 Data size: 15193 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col2 = 0) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) + predicate: ((_col2 = 0L) or (_col5 is null and _col0 is not null and (_col3 >= _col2))) (type: boolean) Statistics: Num rows: 366 Data size: 10110 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out index fc8290e72e..e89b6bca19 100644 --- a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out +++ b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out @@ -166,7 +166,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: s_store_id (type: string), 0 (type: bigint) + keys: s_store_id (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE @@ -277,7 +277,7 @@ STAGE PLANS: native: false vectorProcessingMode: HASH projectedOutputColumnNums: [] - keys: _col0 (type: string), 0 (type: bigint) + keys: _col0 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE @@ -374,7 +374,7 @@ STAGE PLANS: outputColumnNames: _col0 Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: _col0 (type: string), 0 (type: bigint) + keys: _col0 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/vector_interval_1.q.out b/ql/src/test/results/clientpositive/vector_interval_1.q.out index 1c7df24651..8c0086e300 100644 --- a/ql/src/test/results/clientpositive/vector_interval_1.q.out +++ b/ql/src/test/results/clientpositive/vector_interval_1.q.out @@ -118,7 +118,7 @@ STAGE PLANS: enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: string), 1-2 (type: interval_year_month), VALUE._col0 (type: interval_year_month), 1 02:03:04.000000000 (type: interval_day_time), VALUE._col1 (type: interval_day_time) + expressions: KEY.reducesinkkey0 (type: string), INTERVAL'1-2' (type: interval_year_month), VALUE._col0 (type: interval_year_month), INTERVAL'1 02:03:04.000000000' (type: interval_day_time), VALUE._col1 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -195,7 +195,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month) + expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (INTERVAL'1-2' + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (INTERVAL'1-2' - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator @@ -229,7 +229,7 @@ STAGE PLANS: enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: date), 2-4 (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), 0-0 (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month) + expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2-4' (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), INTERVAL'0-0' (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -314,7 +314,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (CAST( str2 AS INTERVAL DAY TO SECOND) + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (1 02:03:04.000000000 + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (CAST( str2 AS INTERVAL DAY TO SECOND) - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (1 02:03:04.000000000 - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time) + expressions: dt (type: date), (CAST( str2 AS INTERVAL DAY TO SECOND) + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (INTERVAL'1 02:03:04.000000000' + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (CAST( str2 AS INTERVAL DAY TO SECOND) - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time), (INTERVAL'1 02:03:04.000000000' - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Select Vectorization: className: VectorSelectOperator @@ -348,7 +348,7 @@ STAGE PLANS: enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false Reduce Operator Tree: Select Operator - expressions: KEY.reducesinkkey0 (type: date), 2 04:06:08.000000000 (type: interval_day_time), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), 0 00:00:00.000000000 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time) + expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2 04:06:08.000000000' (type: interval_day_time), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), INTERVAL'0 00:00:00.000000000' (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -445,7 +445,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (dt + 1-2) (type: date), (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (1-2 + dt) (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + dt) (type: date), (dt - 1-2) (type: date), (dt - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (dt + 1 02:03:04.000000000) (type: timestamp), (dt + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (1 02:03:04.000000000 + dt) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + dt) (type: timestamp), (dt - 1 02:03:04.000000000) (type: timestamp), (dt - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) + expressions: dt (type: date), (dt + INTERVAL'1-2') (type: date), (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (INTERVAL'1-2' + dt) (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + dt) (type: date), (dt - INTERVAL'1-2') (type: date), (dt - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: date), (dt + INTERVAL'1 02:03:04.000000000') (type: timestamp), (dt + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (INTERVAL'1 02:03:04.000000000' + dt) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + dt) (type: timestamp), (dt - INTERVAL'1 02:03:04.000000000') (type: timestamp), (dt - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Select Vectorization: className: VectorSelectOperator @@ -588,7 +588,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: ts (type: timestamp), (ts + 1-2) (type: timestamp), (ts + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (1-2 + ts) (type: timestamp), (CAST( str1 AS INTERVAL YEAR TO MONTH) + ts) (type: timestamp), (ts - 1-2) (type: timestamp), (ts - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (ts + 1 02:03:04.000000000) (type: timestamp), (ts + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (1 02:03:04.000000000 + ts) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + ts) (type: timestamp), (ts - 1 02:03:04.000000000) (type: timestamp), (ts - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) + expressions: ts (type: timestamp), (ts + INTERVAL'1-2') (type: timestamp), (ts + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (INTERVAL'1-2' + ts) (type: timestamp), (CAST( str1 AS INTERVAL YEAR TO MONTH) + ts) (type: timestamp), (ts - INTERVAL'1-2') (type: timestamp), (ts - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: timestamp), (ts + INTERVAL'1 02:03:04.000000000') (type: timestamp), (ts + CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp), (INTERVAL'1 02:03:04.000000000' + ts) (type: timestamp), (CAST( str2 AS INTERVAL DAY TO SECOND) + ts) (type: timestamp), (ts - INTERVAL'1 02:03:04.000000000') (type: timestamp), (ts - CAST( str2 AS INTERVAL DAY TO SECOND)) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Select Vectorization: className: VectorSelectOperator @@ -713,7 +713,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: ts (type: timestamp), (ts - ts) (type: interval_day_time), (2001-01-01 01:02:03.0 - ts) (type: interval_day_time), (ts - 2001-01-01 01:02:03.0) (type: interval_day_time) + expressions: ts (type: timestamp), (ts - ts) (type: interval_day_time), (TIMESTAMP'2001-01-01 01:02:03.0' - ts) (type: interval_day_time), (ts - TIMESTAMP'2001-01-01 01:02:03.0') (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -820,7 +820,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (dt - dt) (type: interval_day_time), (2001-01-01 - dt) (type: interval_day_time), (dt - 2001-01-01) (type: interval_day_time) + expressions: dt (type: date), (dt - dt) (type: interval_day_time), (DATE'2001-01-01' - dt) (type: interval_day_time), (dt - DATE'2001-01-01') (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -933,7 +933,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dt (type: date), (ts - dt) (type: interval_day_time), (2001-01-01 01:02:03.0 - dt) (type: interval_day_time), (ts - 2001-01-01) (type: interval_day_time), (dt - ts) (type: interval_day_time), (dt - 2001-01-01 01:02:03.0) (type: interval_day_time), (2001-01-01 - ts) (type: interval_day_time) + expressions: dt (type: date), (ts - dt) (type: interval_day_time), (TIMESTAMP'2001-01-01 01:02:03.0' - dt) (type: interval_day_time), (ts - DATE'2001-01-01') (type: interval_day_time), (dt - ts) (type: interval_day_time), (dt - TIMESTAMP'2001-01-01 01:02:03.0') (type: interval_day_time), (DATE'2001-01-01' - ts) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out index 3ab7467df8..1547942d58 100644 --- a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out @@ -78,7 +78,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dateval (type: date), (dateval - 2-2) (type: date), (dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + -2-2) (type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date) + expressions: dateval (type: date), (dateval - INTERVAL'2-2') (type: date), (dateval - INTERVAL'-2-2') (type: date), (dateval + INTERVAL'2-2') (type: date), (dateval + INTERVAL'-2-2') (type: date), (INTERVAL'-2-2' + dateval) (type: date), (INTERVAL'2-2' + dateval) (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -243,7 +243,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dateval (type: date), (dateval - 1999-06-07) (type: interval_day_time), (1999-06-07 - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) + expressions: dateval (type: date), (dateval - DATE'1999-06-07') (type: interval_day_time), (DATE'1999-06-07' - dateval) (type: interval_day_time), (dateval - dateval) (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator @@ -408,7 +408,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: tsval (type: timestamp), (tsval - 2-2) (type: timestamp), (tsval - -2-2) (type: timestamp), (tsval + 2-2) (type: timestamp), (tsval + -2-2) (type: timestamp), (-2-2 + tsval) (type: timestamp), (2-2 + tsval) (type: timestamp) + expressions: tsval (type: timestamp), (tsval - INTERVAL'2-2') (type: timestamp), (tsval - INTERVAL'-2-2') (type: timestamp), (tsval + INTERVAL'2-2') (type: timestamp), (tsval + INTERVAL'-2-2') (type: timestamp), (INTERVAL'-2-2' + tsval) (type: timestamp), (INTERVAL'2-2' + tsval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -571,7 +571,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month) + expressions: INTERVAL'5-5' (type: interval_year_month), INTERVAL'-1-1' (type: interval_year_month) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator @@ -676,7 +676,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: dateval (type: date), (dateval - 99 11:22:33.123456789) (type: timestamp), (dateval - -99 11:22:33.123456789) (type: timestamp), (dateval + 99 11:22:33.123456789) (type: timestamp), (dateval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + dateval) (type: timestamp), (99 11:22:33.123456789 + dateval) (type: timestamp) + expressions: dateval (type: date), (dateval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (dateval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + dateval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + dateval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -1010,7 +1010,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: tsval (type: timestamp), (tsval - 99 11:22:33.123456789) (type: timestamp), (tsval - -99 11:22:33.123456789) (type: timestamp), (tsval + 99 11:22:33.123456789) (type: timestamp), (tsval + -99 11:22:33.123456789) (type: timestamp), (-99 11:22:33.123456789 + tsval) (type: timestamp), (99 11:22:33.123456789 + tsval) (type: timestamp) + expressions: tsval (type: timestamp), (tsval - INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval - INTERVAL'-99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'99 11:22:33.123456789') (type: timestamp), (tsval + INTERVAL'-99 11:22:33.123456789') (type: timestamp), (INTERVAL'-99 11:22:33.123456789' + tsval) (type: timestamp), (INTERVAL'99 11:22:33.123456789' + tsval) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Select Vectorization: className: VectorSelectOperator @@ -1171,7 +1171,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: 109 20:30:40.246913578 (type: interval_day_time), 89 02:14:26.000000000 (type: interval_day_time) + expressions: INTERVAL'109 20:30:40.246913578' (type: interval_day_time), INTERVAL'89 02:14:26.000000000' (type: interval_day_time) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vector_nvl.q.out b/ql/src/test/results/clientpositive/vector_nvl.q.out index 1c5cedb5de..11eacc21f7 100644 --- a/ql/src/test/results/clientpositive/vector_nvl.q.out +++ b/ql/src/test/results/clientpositive/vector_nvl.q.out @@ -33,7 +33,7 @@ STAGE PLANS: predicate: cdouble is null (type: boolean) Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: double), 100.0 (type: double) + expressions: null (type: double), 100.0D (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out b/ql/src/test/results/clientpositive/vector_string_concat.q.out index ff3b755d56..bede8a1bcd 100644 --- a/ql/src/test/results/clientpositive/vector_string_concat.q.out +++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out @@ -334,7 +334,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0) + 1.0)))), '-'), UDFToString(year(dt))) (type: string) + expressions: concat(concat(concat('Quarter ', UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0D) + 1.0D)))), '-'), UDFToString(year(dt))) (type: string) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vector_struct_in.q.out b/ql/src/test/results/clientpositive/vector_struct_in.q.out index 773908b6a6..f980286c5d 100644 --- a/ql/src/test/results/clientpositive/vector_struct_in.q.out +++ b/ql/src/test/results/clientpositive/vector_struct_in.q.out @@ -819,7 +819,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterStructColumnInList(structExpressions [col 0:bigint, col 1:string, col 2:double], fieldVectorColumnTypes [LONG, BYTES, DOUBLE], structColumnMap [0, 1, 2]) - predicate: (struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5)) (type: boolean) + predicate: (struct(my_bigint,my_string,my_double)) IN (const struct(1L,'a',1.5D), const struct(1L,'b',-0.5D), const struct(3L,'b',1.5D), const struct(1L,'d',1.5D), const struct(1L,'c',1.5D), const struct(1L,'b',2.5D), const struct(1L,'b',0.5D), const struct(5L,'b',1.5D), const struct(1L,'a',0.5D), const struct(3L,'b',1.5D)) (type: boolean) Statistics: Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double) @@ -937,7 +937,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double), (struct(my_bigint,my_string,my_double)) IN (const struct(1,'a',1.5), const struct(1,'b',-0.5), const struct(3,'b',1.5), const struct(1,'d',1.5), const struct(1,'c',1.5), const struct(1,'b',2.5), const struct(1,'b',0.5), const struct(5,'b',1.5), const struct(1,'a',0.5), const struct(3,'b',1.5)) (type: boolean) + expressions: my_bigint (type: bigint), my_string (type: string), my_double (type: double), (struct(my_bigint,my_string,my_double)) IN (const struct(1L,'a',1.5D), const struct(1L,'b',-0.5D), const struct(3L,'b',1.5D), const struct(1L,'d',1.5D), const struct(1L,'c',1.5D), const struct(1L,'b',2.5D), const struct(1L,'b',0.5D), const struct(5L,'b',1.5D), const struct(1L,'a',0.5D), const struct(3L,'b',1.5D)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_1.q.out b/ql/src/test/results/clientpositive/vectorization_1.q.out index e72321b0a2..523058183d 100644 --- a/ql/src/test/results/clientpositive/vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/vectorization_1.q.out @@ -121,7 +121,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 / -26.28) (type: double), _col1 (type: double), (-1.389 + _col1) (type: double), (_col1 * (-1.389 + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389 + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175 % (- (_col1 * (-1.389 + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) + expressions: _col0 (type: double), (_col0 / -26.28D) (type: double), _col1 (type: double), (-1.389D + _col1) (type: double), (_col1 * (-1.389D + _col1)) (type: double), _col2 (type: tinyint), (- (_col1 * (-1.389D + _col1))) (type: double), _col3 (type: int), (CAST( _col3 AS decimal(10,0)) * 79.553) (type: decimal(16,3)), _col4 (type: double), (10.175D % (- (_col1 * (-1.389D + _col1)))) (type: double), _col5 (type: bigint), (-563 % _col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_10.q.out b/ql/src/test/results/clientpositive/vectorization_10.q.out index 325d1a74e2..02189b80b6 100644 --- a/ql/src/test/results/clientpositive/vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/vectorization_10.q.out @@ -69,10 +69,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringGroupColLessEqualStringScalar(col 7:string, val 10), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterDecimalScalarGreaterEqualDecimalColumn(val -5638.15, col 14:decimal(6,2))(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(6,2))), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 5:double, val 6981.0), FilterExprOrExpr(children: FilterDecimalColEqualDecimalScalar(col 15:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(11,4)), FilterStringColLikeStringScalar(col 6:string, pattern %a)))) - predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) + predicate: (((UDFToDouble(ctinyint) > cdouble) and (-5638.15 >= CAST( ctinyint AS decimal(6,2)))) or ((cdouble > 6981.0D) and ((CAST( csmallint AS decimal(11,4)) = 9763215.5639) or (cstring1 like '%a'))) or (cstring2 <= '10')) (type: boolean) Statistics: Num rows: 9557 Data size: 2054789 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639 - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) + expressions: cdouble (type: double), ctimestamp1 (type: timestamp), ctinyint (type: tinyint), cboolean1 (type: boolean), cstring1 (type: string), (- cdouble) (type: double), (cdouble + UDFToDouble(csmallint)) (type: double), ((cdouble + UDFToDouble(csmallint)) % 33.0D) (type: double), (- cdouble) (type: double), (UDFToDouble(ctinyint) % cdouble) (type: double), (UDFToShort(ctinyint) % csmallint) (type: smallint), (- cdouble) (type: double), (cbigint * UDFToLong((UDFToShort(ctinyint) % csmallint))) (type: bigint), (9763215.5639D - (cdouble + UDFToDouble(csmallint))) (type: double), (- (- cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_11.q.out b/ql/src/test/results/clientpositive/vectorization_11.q.out index 027d71820f..dd7fa34d54 100644 --- a/ql/src/test/results/clientpositive/vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/vectorization_11.q.out @@ -54,7 +54,7 @@ STAGE PLANS: predicate: ((cstring2 = cstring1) or (ctimestamp1 is null and (cstring1 like '%a'))) (type: boolean) Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0) (type: double), (cdouble * -5638.15) (type: double) + expressions: cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), ctimestamp1 (type: timestamp), (-3728 * UDFToInteger(csmallint)) (type: int), (cdouble - 9763215.5639D) (type: double), (- cdouble) (type: double), ((- cdouble) + 6981.0D) (type: double), (cdouble * -5638.15D) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_12.q.out b/ql/src/test/results/clientpositive/vectorization_12.q.out index fb0a065615..3eca12c099 100644 --- a/ql/src/test/results/clientpositive/vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/vectorization_12.q.out @@ -149,7 +149,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1877 Data size: 403561 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0 * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0 * _col0) / -6432.0) (type: double), (- ((-6432.0 * _col0) / -6432.0)) (type: double), _col6 (type: double), (- (-6432.0 * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0 * _col0)) (type: double), (- (- ((-6432.0 * _col0) / -6432.0))) (type: double), (((-6432.0 * _col0) / -6432.0) + (- (-6432.0 * _col0))) (type: double), _col8 (type: double) + expressions: _col1 (type: bigint), _col3 (type: boolean), _col2 (type: string), _col0 (type: double), (-6432.0D * _col0) (type: double), (- _col1) (type: bigint), _col4 (type: bigint), (_col1 * _col4) (type: bigint), _col5 (type: double), ((-6432.0D * _col0) / -6432.0D) (type: double), (- ((-6432.0D * _col0) / -6432.0D)) (type: double), _col6 (type: double), (- (-6432.0D * _col0)) (type: double), (-5638.15 + CAST( _col1 AS decimal(19,0))) (type: decimal(22,2)), _col7 (type: bigint), (_col6 / (-6432.0D * _col0)) (type: double), (- (- ((-6432.0D * _col0) / -6432.0D))) (type: double), (((-6432.0D * _col0) / -6432.0D) + (- (-6432.0D * _col0))) (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col17, _col18, _col19 Statistics: Num rows: 1877 Data size: 403561 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out index caad57a4bd..ee1d65e8bf 100644 --- a/ql/src/test/results/clientpositive/vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/vectorization_13.q.out @@ -88,7 +88,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 11.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 12.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > 11.0D) and (UDFToDouble(ctimestamp2) <> 12.0D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -151,7 +151,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -417,7 +417,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessDoubleScalar(col 4:float, val 3569.0), FilterDoubleScalarGreaterEqualDoubleColumn(val 10.175, col 5:double), FilterLongColNotEqualLongScalar(col 10:boolean, val 1)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -1.388)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val -1.3359999999999999)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDecimalColLessDecimalScalar(col 14:decimal(11,4), val 9763215.5639)(children: CastLongToDecimal(col 0:tinyint) -> 14:decimal(11,4)))) - predicate: (((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175 >= cdouble) and (cboolean1 <> 1))) (type: boolean) + predicate: (((UDFToDouble(ctimestamp1) > -1.388D) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (10.175D >= cdouble) and (cboolean1 <> 1))) (type: boolean) Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -474,7 +474,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) + expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_14.q.out b/ql/src/test/results/clientpositive/vectorization_14.q.out index 0a0c5fba8b..dd28197fa8 100644 --- a/ql/src/test/results/clientpositive/vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/vectorization_14.q.out @@ -88,10 +88,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColLessEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 2:int) -> 13:double), FilterTimestampColLessTimestampColumn(col 9:timestamp, col 8:timestamp)), FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -257), FilterDoubleColLessDoubleColumn(col 4:float, col 13:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float))) - predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) + predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257L) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean) Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double) + expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Select Vectorization: className: VectorSelectOperator @@ -152,7 +152,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28 + _col2) (type: double), (- (-26.28 + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28 + _col2)) / 10.175) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28 + _col2)) / 10.175)) (type: double), (-1.389 % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) + expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175D) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out index 3e3cebab6d..9101f9bf54 100644 --- a/ql/src/test/results/clientpositive/vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/vectorization_15.q.out @@ -84,7 +84,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %ss%), FilterStringColLikeStringScalar(col 6:string, pattern 10%), FilterExprAndExpr(children: FilterLongColGreaterEqualLongScalar(col 2:int, val -75), FilterLongColEqualLongColumn(col 0:smallint, col 1:smallint)(children: col 0:tinyint), FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -3728.0))) - predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) + predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0D)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean) @@ -146,7 +146,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) + expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_16.q.out b/ql/src/test/results/clientpositive/vectorization_16.q.out index 69f59829e3..b33c0f7a6c 100644 --- a/ql/src/test/results/clientpositive/vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/vectorization_16.q.out @@ -61,7 +61,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -124,7 +124,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_17.q.out b/ql/src/test/results/clientpositive/vectorization_17.q.out index 7104ae50ee..4f0b9ac499 100644 --- a/ql/src/test/results/clientpositive/vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/vectorization_17.q.out @@ -69,10 +69,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColGreaterLongScalar(col 3:bigint, val -23), FilterExprOrExpr(children: FilterDoubleColNotEqualDoubleScalar(col 5:double, val 988888.0), FilterDecimalColGreaterDecimalScalar(col 13:decimal(13,3), val -863.257)(children: CastLongToDecimal(col 2:int) -> 13:decimal(13,3))), FilterExprOrExpr(children: FilterLongColGreaterEqualLongScalar(col 0:tinyint, val 33), FilterLongColGreaterEqualLongColumn(col 1:bigint, col 3:bigint)(children: col 1:smallint), FilterDoubleColEqualDoubleColumn(col 4:double, col 5:double)(children: col 4:float))) - predicate: (((cdouble <> 988888.0) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23)) (type: boolean) + predicate: (((cdouble <> 988888.0D) or (CAST( cint AS decimal(13,3)) > -863.257)) and ((ctinyint >= 33Y) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)) and (cbigint > -23L)) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58 + (- (- cdouble))) (type: double) + expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % CAST( cbigint AS decimal(19,0))) (type: decimal(11,4)), (2563.58D + (- (- cdouble))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_2.q.out b/ql/src/test/results/clientpositive/vectorization_2.q.out index 6c3b27771c..78e24b10f4 100644 --- a/ql/src/test/results/clientpositive/vectorization_2.q.out +++ b/ql/src/test/results/clientpositive/vectorization_2.q.out @@ -67,7 +67,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterTimestampColLessTimestampColumn(col 8:timestamp, col 9:timestamp), FilterStringColLikeStringScalar(col 7:string, pattern b%), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -5638.14990234375)), FilterExprAndExpr(children: FilterDoubleColLessDoubleColumn(col 5:double, col 13:double)(children: CastLongToDouble(col 0:tinyint) -> 13:double), FilterExprOrExpr(children: FilterDoubleScalarNotEqualDoubleColumn(val -10669.0, col 13:double)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterLongScalarGreaterLongColumn(val 359, col 2:int)))) - predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0 <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) + predicate: (((cdouble < UDFToDouble(ctinyint)) and ((-10669.0D <> UDFToDouble(ctimestamp2)) or (359 > cint))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean) Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cbigint (type: bigint), cfloat (type: float), cdouble (type: double) @@ -125,7 +125,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 % -563.0) (type: double), (_col0 + 762.0) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) + expressions: _col0 (type: double), (_col0 % -563.0D) (type: double), (_col0 + 762.0D) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0D) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_3.q.out b/ql/src/test/results/clientpositive/vectorization_3.q.out index 0a0a3b9c80..4cd206fbb8 100644 --- a/ql/src/test/results/clientpositive/vectorization_3.q.out +++ b/ql/src/test/results/clientpositive/vectorization_3.q.out @@ -72,7 +72,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: FilterDoubleColLessEqualDoubleColumn(col 13:float, col 4:float)(children: CastLongToFloatViaLongToDouble(col 2:int) -> 13:float), FilterDecimalScalarNotEqualDecimalColumn(val 79.553, col 14:decimal(22,3))(children: CastLongToDecimal(col 3:bigint) -> 14:decimal(22,3)), FilterDoubleColEqualDoubleScalar(col 13:double, val -29071.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleColumn(col 13:double, col 5:double)(children: CastLongToDouble(col 3:bigint) -> 13:double), FilterDecimalScalarLessEqualDecimalColumn(val 79.553, col 15:decimal(8,3))(children: CastLongToDecimal(col 1:smallint) -> 15:decimal(8,3)), FilterTimestampColGreaterTimestampColumn(col 8:timestamp, col 9:timestamp))) - predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0))) (type: boolean) + predicate: (((UDFToDouble(cbigint) > cdouble) and (79.553 <= CAST( csmallint AS decimal(8,3))) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (79.553 <> CAST( cbigint AS decimal(22,3))) and (UDFToDouble(ctimestamp2) = -29071.0D))) (type: boolean) Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float) @@ -130,7 +130,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: double), (_col0 - 10.175) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175)) (type: double), (- _col1) (type: double), (_col0 % 79.553) (type: double), (- (_col0 * (_col0 - 10.175))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175))) / (_col0 - 10.175)) (type: double), (- (_col0 - 10.175)) (type: double), _col4 (type: double), (-3728.0 - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) + expressions: _col0 (type: double), (_col0 - 10.175D) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175D)) (type: double), (- _col1) (type: double), (_col0 % 79.553D) (type: double), (- (_col0 * (_col0 - 10.175D))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175D))) / (_col0 - 10.175D)) (type: double), (- (_col0 - 10.175D)) (type: double), _col4 (type: double), (-3728.0D - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_4.q.out b/ql/src/test/results/clientpositive/vectorization_4.q.out index ee618691b5..dcc5f2ce71 100644 --- a/ql/src/test/results/clientpositive/vectorization_4.q.out +++ b/ql/src/test/results/clientpositive/vectorization_4.q.out @@ -67,7 +67,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColGreaterEqualLongColumn(col 1:int, col 2:int)(children: col 1:smallint), FilterExprAndExpr(children: FilterLongScalarGreaterEqualLongColumn(val -89010, col 0:int)(children: col 0:tinyint), FilterDoubleColGreaterDoubleScalar(col 5:double, val 79.553)), FilterExprAndExpr(children: FilterLongScalarNotEqualLongColumn(val -563, col 3:bigint), FilterExprOrExpr(children: FilterLongColNotEqualLongColumn(col 0:bigint, col 3:bigint)(children: col 0:tinyint), FilterDoubleScalarGreaterEqualDoubleColumn(val -3728.0, col 5:double)))) - predicate: (((-563 <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0 >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) + predicate: (((-563L <> cbigint) and ((UDFToLong(ctinyint) <> cbigint) or (-3728.0D >= cdouble))) or ((-89010 >= UDFToInteger(ctinyint)) and (cdouble > 79.553D)) or (UDFToInteger(csmallint) >= cint)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cint (type: int), cdouble (type: double) @@ -125,7 +125,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: bigint), (_col0 * -563) (type: bigint), (-3728 + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2)) (type: double), ((-3728 + _col0) - (_col0 * -563)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563) % _col0)) / _col2))) (type: double) + expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2)) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_5.q.out b/ql/src/test/results/clientpositive/vectorization_5.q.out index 13fa89b9a9..fc6d69b4f1 100644 --- a/ql/src/test/results/clientpositive/vectorization_5.q.out +++ b/ql/src/test/results/clientpositive/vectorization_5.q.out @@ -119,7 +119,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0 % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) + expressions: _col0 (type: smallint), (UDFToInteger(_col0) * -75) (type: int), _col1 (type: bigint), (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1)) (type: double), (6981 * UDFToInteger(_col0)) (type: int), _col2 (type: smallint), (- _col2) (type: smallint), (197.0D % (UDFToDouble((UDFToInteger(_col0) * -75)) / UDFToDouble(_col1))) (type: double), _col3 (type: bigint), _col4 (type: tinyint), (- _col4) (type: tinyint), ((- _col4) + _col4) (type: tinyint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_6.q.out b/ql/src/test/results/clientpositive/vectorization_6.q.out index f2204fc636..c29a37765b 100644 --- a/ql/src/test/results/clientpositive/vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/vectorization_6.q.out @@ -63,10 +63,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterExprAndExpr(children: FilterLongColLessEqualLongScalar(col 10:boolean, val 0), FilterLongColGreaterEqualLongColumn(col 11:boolean, col 10:boolean)), FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:bigint), FilterExprOrExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %a), FilterDoubleColLessEqualDoubleScalar(col 4:float, val -257.0))))) - predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0)) (type: boolean) + predicate: ((((cboolean1 <= 0) and (cboolean2 >= cboolean1)) or (cbigint is not null and ((cstring2 like '%a') or (cfloat <= -257)))) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 11605 Data size: 2495116 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28 / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) + expressions: cboolean1 (type: boolean), cfloat (type: float), cstring1 (type: string), (988888 * UDFToInteger(csmallint)) (type: int), (- csmallint) (type: smallint), (- cfloat) (type: float), (-26.28D / UDFToDouble(cfloat)) (type: double), (cfloat * 359.0) (type: float), (cint % UDFToInteger(ctinyint)) (type: int), (- cdouble) (type: double), (UDFToInteger(ctinyint) - -75) (type: int), (762 * (cint % UDFToInteger(ctinyint))) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out index 51d2b45a20..fcf7eec4f5 100644 --- a/ql/src/test/results/clientpositive/vectorization_7.q.out +++ b/ql/src/test/results/clientpositive/vectorization_7.q.out @@ -75,7 +75,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val -15.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 1174134 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) @@ -295,7 +295,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColNotEqualLongScalar(col 0:tinyint, val 0), FilterExprOrExpr(children: FilterDoubleColLessEqualDoubleScalar(col 13:double, val 0.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterLongColEqualLongColumn(col 0:int, col 2:int)(children: col 0:tinyint), FilterStringColLikeStringScalar(col 7:string, pattern ss)), FilterExprOrExpr(children: FilterDoubleScalarLessDoubleColumn(val 988888.0, col 5:double), FilterExprAndExpr(children: FilterDoubleColGreaterDoubleScalar(col 13:double, val 7.6850000000000005)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double), FilterDoubleScalarGreaterEqualDoubleColumn(val 3569.0, col 5:double)))) - predicate: (((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0)) (type: boolean) + predicate: (((988888.0D < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005D) and (3569.0D >= cdouble))) and ((UDFToDouble(ctimestamp1) <= 0.0D) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and (ctinyint <> 0Y)) (type: boolean) Statistics: Num rows: 5461 Data size: 1174134 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint) diff --git a/ql/src/test/results/clientpositive/vectorization_8.q.out b/ql/src/test/results/clientpositive/vectorization_8.q.out index 1da562389d..da4c380da5 100644 --- a/ql/src/test/results/clientpositive/vectorization_8.q.out +++ b/ql/src/test/results/clientpositive/vectorization_8.q.out @@ -71,10 +71,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 10.0)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 16.0)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0D) and (UDFToDouble(ctimestamp2) <> 16.0D))) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator @@ -278,10 +278,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterExprAndExpr(children: SelectColumnIsNotNull(col 7:string), FilterDoubleColLessEqualDoubleScalar(col 13:double, val 12.503)(children: CastTimestampToDouble(col 8:timestamp) -> 13:double), FilterDoubleColNotEqualDoubleScalar(col 13:double, val 11.998)(children: CastTimestampToDouble(col 9:timestamp) -> 13:double)), FilterDoubleColLessDoubleScalar(col 4:float, val -6432.0), FilterExprAndExpr(children: SelectColumnIsNotNull(col 10:boolean), FilterDoubleColEqualDoubleScalar(col 5:double, val 988888.0))) - predicate: ((cboolean1 is not null and (cdouble = 988888.0)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) (type: boolean) + predicate: ((cboolean1 is not null and (cdouble = 988888.0D)) or (cfloat < -6432) or (cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503D) and (UDFToDouble(ctimestamp2) <> 11.998D))) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) + expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15D - cdouble) (type: double), (cdouble * -257.0D) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - cfloat) (type: float), (- cfloat) (type: float), ((-5638.15D - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_9.q.out b/ql/src/test/results/clientpositive/vectorization_9.q.out index 69f59829e3..b33c0f7a6c 100644 --- a/ql/src/test/results/clientpositive/vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/vectorization_9.q.out @@ -61,7 +61,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterStringColLikeStringScalar(col 7:string, pattern %b%), FilterExprOrExpr(children: FilterDoubleColGreaterEqualDoubleScalar(col 5:double, val -1.389), FilterStringGroupColLessStringScalar(col 6:string, val a))) - predicate: (((cdouble >= -1.389) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) + predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp) @@ -124,7 +124,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) + expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out index f4931912f0..955c620a55 100644 --- a/ql/src/test/results/clientpositive/vectorization_limit.q.out +++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out @@ -213,7 +213,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct] Select Operator - expressions: ctinyint (type: tinyint), (cdouble + 1.0) (type: double) + expressions: ctinyint (type: tinyint), (cdouble + 1.0D) (type: double) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorization_numeric_overflows.q.out b/ql/src/test/results/clientpositive/vectorization_numeric_overflows.q.out index 344db2b897..583a55064a 100644 --- a/ql/src/test/results/clientpositive/vectorization_numeric_overflows.q.out +++ b/ql/src/test/results/clientpositive/vectorization_numeric_overflows.q.out @@ -439,10 +439,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 13:tinyint, val 0)(children: LongColSubtractLongScalarChecked(col 0:tinyint, val 2) -> 13:tinyint) - predicate: ((ctinyint1 - 2) > 0) (type: boolean) + predicate: ((ctinyint1 - 2Y) > 0) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: ctinyint1 (type: tinyint), (ctinyint1 - 2) (type: tinyint) + expressions: ctinyint1 (type: tinyint), (ctinyint1 - 2Y) (type: tinyint) outputColumnNames: _col0, _col1 Select Vectorization: className: VectorSelectOperator @@ -543,7 +543,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColLessLongScalar(col 13:tinyint, val 0)(children: LongColAddLongScalarChecked(col 1:tinyint, val 2) -> 13:tinyint) - predicate: ((ctinyint2 + 2) < 0) (type: boolean) + predicate: ((ctinyint2 + 2Y) < 0) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint2 (type: tinyint), (ctinyint2 + 2) (type: int) @@ -645,7 +645,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColLessLongScalar(col 13:smallint, val 0)(children: LongColMultiplyLongScalarChecked(col 3:smallint, val 2) -> 13:smallint) - predicate: ((csmallint2 * 2) < 0) (type: boolean) + predicate: ((csmallint2 * 2S) < 0) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: csmallint2 (type: smallint), (csmallint2 * 2) (type: int) @@ -730,7 +730,7 @@ STAGE PLANS: alias: test_overflow Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((csmallint2 * 2) < 0) (type: boolean) + predicate: ((csmallint2 * 2S) < 0) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: csmallint2 (type: smallint), (csmallint2 * 2) (type: int) @@ -1084,7 +1084,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterLongColGreaterLongScalar(col 13:smallint, val 0)(children: PosModLongToLong(col 2, divisor 16385) -> 13:smallint) - predicate: ((csmallint1 pmod 16385) > 0) (type: boolean) + predicate: ((csmallint1 pmod 16385S) > 0) (type: boolean) Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint1 (type: tinyint), ctinyint2 (type: tinyint), csmallint1 (type: smallint), csmallint2 (type: smallint), cint1 (type: int), cint2 (type: int), cbigint1 (type: bigint), cbigint2 (type: bigint), cfloat1 (type: float), cfloat2 (type: float), cdouble1 (type: double), cdouble2 (type: double) diff --git a/ql/src/test/results/clientpositive/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/vectorization_part_project.q.out index a0e1d91415..e34bb24a1b 100644 --- a/ql/src/test/results/clientpositive/vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/vectorization_part_project.q.out @@ -66,7 +66,7 @@ STAGE PLANS: alias: alltypesorc_part Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (cdouble + 2.0) (type: double) + expressions: (cdouble + 2.0D) (type: double) outputColumnNames: _col0 Statistics: Num rows: 200 Data size: 54496 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out index b89fa511df..428781f28a 100644 --- a/ql/src/test/results/clientpositive/vectorized_case.q.out +++ b/ql/src/test/results/clientpositive/vectorized_case.q.out @@ -57,10 +57,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1:smallint, val 418), FilterLongColEqualLongScalar(col 1:smallint, val 12205), FilterLongColEqualLongScalar(col 1:smallint, val 10583)) - predicate: ((csmallint = 10583) or (csmallint = 12205) or (csmallint = 418)) (type: boolean) + predicate: ((csmallint = 10583S) or (csmallint = 12205S) or (csmallint = 418S)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string) + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE ('c') END (type: string), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -202,10 +202,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprOrExpr(children: FilterLongColEqualLongScalar(col 1:smallint, val 418), FilterLongColEqualLongScalar(col 1:smallint, val 12205), FilterLongColEqualLongScalar(col 1:smallint, val 10583)) - predicate: ((csmallint = 10583) or (csmallint = 12205) or (csmallint = 418)) (type: boolean) + predicate: ((csmallint = 10583S) or (csmallint = 12205S) or (csmallint = 418S)) (type: boolean) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN (null) ELSE ('c') END (type: string) + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN ('b') ELSE (null) END (type: string), CASE WHEN ((csmallint = 418S)) THEN ('a') WHEN ((csmallint = 12205S)) THEN (null) ELSE ('c') END (type: string) outputColumnNames: _col0, _col1, _col2 Select Vectorization: className: VectorSelectOperator @@ -768,7 +768,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE ((attr + 2)) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN ((attr + 1L)) ELSE ((attr + 2L)) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -845,7 +845,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN (null) ELSE ((attr + 2)) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN (null) ELSE ((attr + 2L)) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator @@ -922,7 +922,7 @@ STAGE PLANS: native: true vectorizationSchemaColumns: [0:member:bigint, 1:attr:bigint, 2:ROW__ID:struct] Select Operator - expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (null) END (type: bigint) + expressions: CASE WHEN ((member = 1L)) THEN ((attr + 1L)) ELSE (null) END (type: bigint) outputColumnNames: _col0 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorized_casts.q.out b/ql/src/test/results/clientpositive/vectorized_casts.q.out index f6f210533f..9919cab4b9 100644 --- a/ql/src/test/results/clientpositive/vectorized_casts.q.out +++ b/ql/src/test/results/clientpositive/vectorized_casts.q.out @@ -174,7 +174,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToBoolean(ctinyint) (type: boolean), UDFToBoolean(csmallint) (type: boolean), UDFToBoolean(cint) (type: boolean), UDFToBoolean(cbigint) (type: boolean), UDFToBoolean(cfloat) (type: boolean), UDFToBoolean(cdouble) (type: boolean), cboolean1 (type: boolean), UDFToBoolean((cbigint * 0)) (type: boolean), UDFToBoolean(ctimestamp1) (type: boolean), UDFToBoolean(cstring1) (type: boolean), UDFToInteger(ctinyint) (type: int), UDFToInteger(csmallint) (type: int), cint (type: int), UDFToInteger(cbigint) (type: int), UDFToInteger(cfloat) (type: int), UDFToInteger(cdouble) (type: int), UDFToInteger(cboolean1) (type: int), UDFToInteger(ctimestamp1) (type: int), UDFToInteger(cstring1) (type: int), UDFToInteger(substr(cstring1, 1, 1)) (type: int), UDFToByte(cfloat) (type: tinyint), UDFToShort(cfloat) (type: smallint), UDFToLong(cfloat) (type: bigint), UDFToDouble(ctinyint) (type: double), UDFToDouble(csmallint) (type: double), UDFToDouble(cint) (type: double), UDFToDouble(cbigint) (type: double), UDFToDouble(cfloat) (type: double), cdouble (type: double), UDFToDouble(cboolean1) (type: double), UDFToDouble(ctimestamp1) (type: double), UDFToDouble(cstring1) (type: double), UDFToDouble(substr(cstring1, 1, 1)) (type: double), UDFToFloat(cint) (type: float), UDFToFloat(cdouble) (type: float), CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), CAST( CAST( ctimestamp1 AS DATE) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp), UDFToString(ctinyint) (type: string), UDFToString(csmallint) (type: string), UDFToString(cint) (type: string), UDFToString(cbigint) (type: string), UDFToString(cfloat) (type: string), UDFToString(cdouble) (type: string), UDFToString(cboolean1) (type: string), UDFToString((cbigint * 0)) (type: string), UDFToString(ctimestamp1) (type: string), cstring1 (type: string), UDFToString(CAST( cstring1 AS CHAR(10)) (type: string), UDFToString(CAST( cstring1 AS varchar(10))) (type: string), UDFToFloat(UDFToInteger(cfloat)) (type: float), UDFToDouble((cint * 2)) (type: double), UDFToString(sin(cfloat)) (type: string), (UDFToDouble(UDFToFloat(cint)) + UDFToDouble(cboolean1)) (type: double) + expressions: UDFToBoolean(ctinyint) (type: boolean), UDFToBoolean(csmallint) (type: boolean), UDFToBoolean(cint) (type: boolean), UDFToBoolean(cbigint) (type: boolean), UDFToBoolean(cfloat) (type: boolean), UDFToBoolean(cdouble) (type: boolean), cboolean1 (type: boolean), UDFToBoolean((cbigint * 0L)) (type: boolean), UDFToBoolean(ctimestamp1) (type: boolean), UDFToBoolean(cstring1) (type: boolean), UDFToInteger(ctinyint) (type: int), UDFToInteger(csmallint) (type: int), cint (type: int), UDFToInteger(cbigint) (type: int), UDFToInteger(cfloat) (type: int), UDFToInteger(cdouble) (type: int), UDFToInteger(cboolean1) (type: int), UDFToInteger(ctimestamp1) (type: int), UDFToInteger(cstring1) (type: int), UDFToInteger(substr(cstring1, 1, 1)) (type: int), UDFToByte(cfloat) (type: tinyint), UDFToShort(cfloat) (type: smallint), UDFToLong(cfloat) (type: bigint), UDFToDouble(ctinyint) (type: double), UDFToDouble(csmallint) (type: double), UDFToDouble(cint) (type: double), UDFToDouble(cbigint) (type: double), UDFToDouble(cfloat) (type: double), cdouble (type: double), UDFToDouble(cboolean1) (type: double), UDFToDouble(ctimestamp1) (type: double), UDFToDouble(cstring1) (type: double), UDFToDouble(substr(cstring1, 1, 1)) (type: double), UDFToFloat(cint) (type: float), UDFToFloat(cdouble) (type: float), CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), CAST( CAST( ctimestamp1 AS DATE) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp), UDFToString(ctinyint) (type: string), UDFToString(csmallint) (type: string), UDFToString(cint) (type: string), UDFToString(cbigint) (type: string), UDFToString(cfloat) (type: string), UDFToString(cdouble) (type: string), UDFToString(cboolean1) (type: string), UDFToString((cbigint * 0L)) (type: string), UDFToString(ctimestamp1) (type: string), cstring1 (type: string), UDFToString(CAST( cstring1 AS CHAR(10)) (type: string), UDFToString(CAST( cstring1 AS varchar(10))) (type: string), UDFToFloat(UDFToInteger(cfloat)) (type: float), UDFToDouble((cint * 2)) (type: double), UDFToString(sin(cfloat)) (type: string), (UDFToDouble(UDFToFloat(cint)) + UDFToDouble(cboolean1)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, _col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, _col55, _col56, _col57, _col58, _col59, _col60, _col61, _col62 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out index 298d92bdcd..50c3448864 100644 --- a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out @@ -262,7 +262,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: fl_time (type: timestamp), to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), dayofweek(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: date), date_add(fl_time, 2) (type: date), date_sub(fl_time, 2) (type: date), datediff(fl_time, '2000-01-01') (type: int), datediff(fl_time, 2000-01-01) (type: int), datediff(fl_time, 2000-01-01 00:00:00.0) (type: int), datediff(fl_time, 2000-01-01 11:13:09.0) (type: int), datediff(fl_time, '2007-03-14') (type: int), datediff(fl_time, 2007-03-14) (type: int), datediff(fl_time, 2007-03-14 00:00:00.0) (type: int), datediff(fl_time, 2007-03-14 08:21:59.0) (type: int) + expressions: fl_time (type: timestamp), to_unix_timestamp(fl_time) (type: bigint), year(fl_time) (type: int), month(fl_time) (type: int), day(fl_time) (type: int), dayofmonth(fl_time) (type: int), dayofweek(fl_time) (type: int), weekofyear(fl_time) (type: int), CAST( fl_time AS DATE) (type: date), to_date(fl_time) (type: date), date_add(fl_time, 2) (type: date), date_sub(fl_time, 2) (type: date), datediff(fl_time, '2000-01-01') (type: int), datediff(fl_time, DATE'2000-01-01') (type: int), datediff(fl_time, TIMESTAMP'2000-01-01 00:00:00.0') (type: int), datediff(fl_time, TIMESTAMP'2000-01-01 11:13:09.0') (type: int), datediff(fl_time, '2007-03-14') (type: int), datediff(fl_time, DATE'2007-03-14') (type: int), datediff(fl_time, TIMESTAMP'2007-03-14 00:00:00.0') (type: int), datediff(fl_time, TIMESTAMP'2007-03-14 08:21:59.0') (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 Select Vectorization: className: VectorSelectOperator @@ -550,7 +550,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: fl_date (type: date), to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), dayofweek(fl_date) (type: int), weekofyear(fl_date) (type: int), fl_date (type: date), to_date(fl_date) (type: date), date_add(fl_date, 2) (type: date), date_sub(fl_date, 2) (type: date), datediff(fl_date, '2000-01-01') (type: int), datediff(fl_date, 2000-01-01) (type: int), datediff(fl_date, 2000-01-01 00:00:00.0) (type: int), datediff(fl_date, 2000-01-01 11:13:09.0) (type: int), datediff(fl_date, '2007-03-14') (type: int), datediff(fl_date, 2007-03-14) (type: int), datediff(fl_date, 2007-03-14 00:00:00.0) (type: int), datediff(fl_date, 2007-03-14 08:21:59.0) (type: int) + expressions: fl_date (type: date), to_unix_timestamp(fl_date) (type: bigint), year(fl_date) (type: int), month(fl_date) (type: int), day(fl_date) (type: int), dayofmonth(fl_date) (type: int), dayofweek(fl_date) (type: int), weekofyear(fl_date) (type: int), fl_date (type: date), to_date(fl_date) (type: date), date_add(fl_date, 2) (type: date), date_sub(fl_date, 2) (type: date), datediff(fl_date, '2000-01-01') (type: int), datediff(fl_date, DATE'2000-01-01') (type: int), datediff(fl_date, TIMESTAMP'2000-01-01 00:00:00.0') (type: int), datediff(fl_date, TIMESTAMP'2000-01-01 11:13:09.0') (type: int), datediff(fl_date, '2007-03-14') (type: int), datediff(fl_date, DATE'2007-03-14') (type: int), datediff(fl_date, TIMESTAMP'2007-03-14 00:00:00.0') (type: int), datediff(fl_date, TIMESTAMP'2007-03-14 08:21:59.0') (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 Select Vectorization: className: VectorSelectOperator @@ -842,7 +842,7 @@ STAGE PLANS: TableScan Vectorization: native: true Select Operator - expressions: fl_time (type: timestamp), fl_date (type: date), (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (dayofweek(fl_time) = dayofweek(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = fl_date) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean), (datediff(fl_time, 2000-01-01) = datediff(fl_date, 2000-01-01)) (type: boolean), (datediff(fl_time, 2000-01-01 00:00:00.0) = datediff(fl_date, 2000-01-01 00:00:00.0)) (type: boolean), (datediff(fl_time, 2000-01-01 11:13:09.0) = datediff(fl_date, 2000-01-01 11:13:09.0)) (type: boolean), (datediff(fl_time, '2007-03-14') = datediff(fl_date, '2007-03-14')) (type: boolean), (datediff(fl_time, 2007-03-14) = datediff(fl_date, 2007-03-14)) (type: boolean), (datediff(fl_time, 2007-03-14 00:00:00.0) = datediff(fl_date, 2007-03-14 00:00:00.0)) (type: boolean), (datediff(fl_time, 2007-03-14 08:21:59.0) = datediff(fl_date, 2007-03-14 08:21:59.0)) (type: boolean), (datediff(fl_date, '2000-01-01') = datediff(fl_date, 2000-01-01)) (type: boolean), (datediff(fl_date, '2007-03-14') = datediff(fl_date, 2007-03-14)) (type: boolean) + expressions: fl_time (type: timestamp), fl_date (type: date), (year(fl_time) = year(fl_date)) (type: boolean), (month(fl_time) = month(fl_date)) (type: boolean), (day(fl_time) = day(fl_date)) (type: boolean), (dayofmonth(fl_time) = dayofmonth(fl_date)) (type: boolean), (dayofweek(fl_time) = dayofweek(fl_date)) (type: boolean), (weekofyear(fl_time) = weekofyear(fl_date)) (type: boolean), (CAST( fl_time AS DATE) = fl_date) (type: boolean), (to_date(fl_time) = to_date(fl_date)) (type: boolean), (date_add(fl_time, 2) = date_add(fl_date, 2)) (type: boolean), (date_sub(fl_time, 2) = date_sub(fl_date, 2)) (type: boolean), (datediff(fl_time, '2000-01-01') = datediff(fl_date, '2000-01-01')) (type: boolean), (datediff(fl_time, DATE'2000-01-01') = datediff(fl_date, DATE'2000-01-01')) (type: boolean), (datediff(fl_time, TIMESTAMP'2000-01-01 00:00:00.0') = datediff(fl_date, TIMESTAMP'2000-01-01 00:00:00.0')) (type: boolean), (datediff(fl_time, TIMESTAMP'2000-01-01 11:13:09.0') = datediff(fl_date, TIMESTAMP'2000-01-01 11:13:09.0')) (type: boolean), (datediff(fl_time, '2007-03-14') = datediff(fl_date, '2007-03-14')) (type: boolean), (datediff(fl_time, DATE'2007-03-14') = datediff(fl_date, DATE'2007-03-14')) (type: boolean), (datediff(fl_time, TIMESTAMP'2007-03-14 00:00:00.0') = datediff(fl_date, TIMESTAMP'2007-03-14 00:00:00.0')) (type: boolean), (datediff(fl_time, TIMESTAMP'2007-03-14 08:21:59.0') = datediff(fl_date, TIMESTAMP'2007-03-14 08:21:59.0')) (type: boolean), (datediff(fl_date, '2000-01-01') = datediff(fl_date, DATE'2000-01-01')) (type: boolean), (datediff(fl_date, '2007-03-14') = datediff(fl_date, DATE'2007-03-14')) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out index d5ba561910..52ddd19e2f 100644 --- a/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out @@ -124,10 +124,10 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 13:bigint, val 0)(children: LongColModuloLongScalar(col 3:bigint, val 500) -> 13:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 14:double, val -1.0)(children: FuncSinDoubleToDouble(col 4:float) -> 14:double)) - predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean) + predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0D)) (type: boolean) Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double) + expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0D)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2, cdouble) (type: double), power(log2(cdouble), 2) (type: double), power(log2(cdouble), 2) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159D)) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out index 6c49e038a3..5f43915ec8 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out @@ -231,7 +231,7 @@ STAGE PLANS: className: VectorFilterOperator native: true predicateExpression: FilterTimestampColumnInList(col 0:timestamp, values [0001-01-01 00:00:00.0, 0002-02-02 00:00:00.0]) - predicate: (ts) IN (0001-01-01 00:00:00.0, 0002-02-02 00:00:00.0) (type: boolean) + predicate: (ts) IN (TIMESTAMP'0001-01-01 00:00:00.0', TIMESTAMP'0002-02-02 00:00:00.0') (type: boolean) Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out index d864b57fd9..2ccff7b919 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out @@ -993,7 +993,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 672 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: round(_col0, 0) (type: double), _col1 BETWEEN 8.97077295279421E19 AND 8.97077295279422E19 (type: boolean), _col2 BETWEEN 8.97077295279421E19 AND 8.97077295279422E19 (type: boolean), _col3 BETWEEN 9.20684592523616E19 AND 9.20684592523617E19 (type: boolean), round(_col4, 3) (type: double), round(_col5, 3) (type: double), round(_col6, 3) (type: double), round(_col7, 3) (type: double) + expressions: round(_col0, 0) (type: double), _col1 BETWEEN 8.97077295279421E19D AND 8.97077295279422E19D (type: boolean), _col2 BETWEEN 8.97077295279421E19D AND 8.97077295279422E19D (type: boolean), _col3 BETWEEN 9.20684592523616E19D AND 9.20684592523617E19D (type: boolean), round(_col4, 3) (type: double), round(_col5, 3) (type: double), round(_col6, 3) (type: double), round(_col7, 3) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 1 Data size: 672 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out index a203507512..fd9fd3858c 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out @@ -59,7 +59,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) + expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Select Vectorization: className: VectorSelectOperator @@ -219,7 +219,7 @@ STAGE PLANS: predicate: ((cbigint % 250) = 0) (type: boolean) Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) + expressions: CAST( ctinyint AS TIMESTAMP) (type: timestamp), CAST( csmallint AS TIMESTAMP) (type: timestamp), CAST( cint AS TIMESTAMP) (type: timestamp), CAST( cbigint AS TIMESTAMP) (type: timestamp), CAST( cfloat AS TIMESTAMP) (type: timestamp), CAST( cdouble AS TIMESTAMP) (type: timestamp), CAST( cboolean1 AS TIMESTAMP) (type: timestamp), CAST( (cbigint * 0L) AS TIMESTAMP) (type: timestamp), ctimestamp1 (type: timestamp), CAST( cstring1 AS TIMESTAMP) (type: timestamp), CAST( substr(cstring1, 1, 1) AS TIMESTAMP) (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 Select Vectorization: className: VectorSelectOperator diff --git a/ql/src/test/results/clientpositive/view_cbo.q.out b/ql/src/test/results/clientpositive/view_cbo.q.out index 2fafa4fe22..c740596e70 100644 --- a/ql/src/test/results/clientpositive/view_cbo.q.out +++ b/ql/src/test/results/clientpositive/view_cbo.q.out @@ -21,12 +21,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0) (type: double) + expressions: value (type: string), key (type: string), (UDFToDouble(key) + 1.0D) (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(_col2) - keys: _col0 (type: string), _col1 (type: string), 0 (type: bigint) + keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1500 Data size: 15936 Basic stats: COMPLETE Column stats: NONE @@ -690,7 +690,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 2L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -749,7 +749,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col2 * 2) = _col3) and (_col2 > 0)) (type: boolean) + predicate: (((_col2 * 2) = _col3) and (_col2 > 0L)) (type: boolean) Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -795,7 +795,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + expressions: _col0 (type: string), _col1 (type: string), 1L (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 921cba12b3..1510d4f7f2 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1091; - ::apache::thrift::protocol::TType _etype1094; - xfer += iprot->readListBegin(_etype1094, _size1091); - this->success.resize(_size1091); - uint32_t _i1095; - for (_i1095 = 0; _i1095 < _size1091; ++_i1095) + uint32_t _size1111; + ::apache::thrift::protocol::TType _etype1114; + xfer += iprot->readListBegin(_etype1114, _size1111); + this->success.resize(_size1111); + uint32_t _i1115; + for (_i1115 = 0; _i1115 < _size1111; ++_i1115) { - xfer += iprot->readString(this->success[_i1095]); + xfer += iprot->readString(this->success[_i1115]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1096; - for (_iter1096 = this->success.begin(); _iter1096 != this->success.end(); ++_iter1096) + std::vector ::const_iterator _iter1116; + for (_iter1116 = this->success.begin(); _iter1116 != this->success.end(); ++_iter1116) { - xfer += oprot->writeString((*_iter1096)); + xfer += oprot->writeString((*_iter1116)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1097; - ::apache::thrift::protocol::TType _etype1100; - xfer += iprot->readListBegin(_etype1100, _size1097); - (*(this->success)).resize(_size1097); - uint32_t _i1101; - for (_i1101 = 0; _i1101 < _size1097; ++_i1101) + uint32_t _size1117; + ::apache::thrift::protocol::TType _etype1120; + xfer += iprot->readListBegin(_etype1120, _size1117); + (*(this->success)).resize(_size1117); + uint32_t _i1121; + for (_i1121 = 0; _i1121 < _size1117; ++_i1121) { - xfer += iprot->readString((*(this->success))[_i1101]); + xfer += iprot->readString((*(this->success))[_i1121]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1102; - ::apache::thrift::protocol::TType _etype1105; - xfer += iprot->readListBegin(_etype1105, _size1102); - this->success.resize(_size1102); - uint32_t _i1106; - for (_i1106 = 0; _i1106 < _size1102; ++_i1106) + uint32_t _size1122; + ::apache::thrift::protocol::TType _etype1125; + xfer += iprot->readListBegin(_etype1125, _size1122); + this->success.resize(_size1122); + uint32_t _i1126; + for (_i1126 = 0; _i1126 < _size1122; ++_i1126) { - xfer += iprot->readString(this->success[_i1106]); + xfer += iprot->readString(this->success[_i1126]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1107; - for (_iter1107 = this->success.begin(); _iter1107 != this->success.end(); ++_iter1107) + std::vector ::const_iterator _iter1127; + for (_iter1127 = this->success.begin(); _iter1127 != this->success.end(); ++_iter1127) { - xfer += oprot->writeString((*_iter1107)); + xfer += oprot->writeString((*_iter1127)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1108; - ::apache::thrift::protocol::TType _etype1111; - xfer += iprot->readListBegin(_etype1111, _size1108); - (*(this->success)).resize(_size1108); - uint32_t _i1112; - for (_i1112 = 0; _i1112 < _size1108; ++_i1112) + uint32_t _size1128; + ::apache::thrift::protocol::TType _etype1131; + xfer += iprot->readListBegin(_etype1131, _size1128); + (*(this->success)).resize(_size1128); + uint32_t _i1132; + for (_i1132 = 0; _i1132 < _size1128; ++_i1132) { - xfer += iprot->readString((*(this->success))[_i1112]); + xfer += iprot->readString((*(this->success))[_i1132]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1113; - ::apache::thrift::protocol::TType _ktype1114; - ::apache::thrift::protocol::TType _vtype1115; - xfer += iprot->readMapBegin(_ktype1114, _vtype1115, _size1113); - uint32_t _i1117; - for (_i1117 = 0; _i1117 < _size1113; ++_i1117) + uint32_t _size1133; + ::apache::thrift::protocol::TType _ktype1134; + ::apache::thrift::protocol::TType _vtype1135; + xfer += iprot->readMapBegin(_ktype1134, _vtype1135, _size1133); + uint32_t _i1137; + for (_i1137 = 0; _i1137 < _size1133; ++_i1137) { - std::string _key1118; - xfer += iprot->readString(_key1118); - Type& _val1119 = this->success[_key1118]; - xfer += _val1119.read(iprot); + std::string _key1138; + xfer += iprot->readString(_key1138); + Type& _val1139 = this->success[_key1138]; + xfer += _val1139.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1120; - for (_iter1120 = this->success.begin(); _iter1120 != this->success.end(); ++_iter1120) + std::map ::const_iterator _iter1140; + for (_iter1140 = this->success.begin(); _iter1140 != this->success.end(); ++_iter1140) { - xfer += oprot->writeString(_iter1120->first); - xfer += _iter1120->second.write(oprot); + xfer += oprot->writeString(_iter1140->first); + xfer += _iter1140->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1121; - ::apache::thrift::protocol::TType _ktype1122; - ::apache::thrift::protocol::TType _vtype1123; - xfer += iprot->readMapBegin(_ktype1122, _vtype1123, _size1121); - uint32_t _i1125; - for (_i1125 = 0; _i1125 < _size1121; ++_i1125) + uint32_t _size1141; + ::apache::thrift::protocol::TType _ktype1142; + ::apache::thrift::protocol::TType _vtype1143; + xfer += iprot->readMapBegin(_ktype1142, _vtype1143, _size1141); + uint32_t _i1145; + for (_i1145 = 0; _i1145 < _size1141; ++_i1145) { - std::string _key1126; - xfer += iprot->readString(_key1126); - Type& _val1127 = (*(this->success))[_key1126]; - xfer += _val1127.read(iprot); + std::string _key1146; + xfer += iprot->readString(_key1146); + Type& _val1147 = (*(this->success))[_key1146]; + xfer += _val1147.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1128; - ::apache::thrift::protocol::TType _etype1131; - xfer += iprot->readListBegin(_etype1131, _size1128); - this->success.resize(_size1128); - uint32_t _i1132; - for (_i1132 = 0; _i1132 < _size1128; ++_i1132) + uint32_t _size1148; + ::apache::thrift::protocol::TType _etype1151; + xfer += iprot->readListBegin(_etype1151, _size1148); + this->success.resize(_size1148); + uint32_t _i1152; + for (_i1152 = 0; _i1152 < _size1148; ++_i1152) { - xfer += this->success[_i1132].read(iprot); + xfer += this->success[_i1152].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1133; - for (_iter1133 = this->success.begin(); _iter1133 != this->success.end(); ++_iter1133) + std::vector ::const_iterator _iter1153; + for (_iter1153 = this->success.begin(); _iter1153 != this->success.end(); ++_iter1153) { - xfer += (*_iter1133).write(oprot); + xfer += (*_iter1153).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1134; - ::apache::thrift::protocol::TType _etype1137; - xfer += iprot->readListBegin(_etype1137, _size1134); - (*(this->success)).resize(_size1134); - uint32_t _i1138; - for (_i1138 = 0; _i1138 < _size1134; ++_i1138) + uint32_t _size1154; + ::apache::thrift::protocol::TType _etype1157; + xfer += iprot->readListBegin(_etype1157, _size1154); + (*(this->success)).resize(_size1154); + uint32_t _i1158; + for (_i1158 = 0; _i1158 < _size1154; ++_i1158) { - xfer += (*(this->success))[_i1138].read(iprot); + xfer += (*(this->success))[_i1158].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1139; - ::apache::thrift::protocol::TType _etype1142; - xfer += iprot->readListBegin(_etype1142, _size1139); - this->success.resize(_size1139); - uint32_t _i1143; - for (_i1143 = 0; _i1143 < _size1139; ++_i1143) + uint32_t _size1159; + ::apache::thrift::protocol::TType _etype1162; + xfer += iprot->readListBegin(_etype1162, _size1159); + this->success.resize(_size1159); + uint32_t _i1163; + for (_i1163 = 0; _i1163 < _size1159; ++_i1163) { - xfer += this->success[_i1143].read(iprot); + xfer += this->success[_i1163].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1144; - for (_iter1144 = this->success.begin(); _iter1144 != this->success.end(); ++_iter1144) + std::vector ::const_iterator _iter1164; + for (_iter1164 = this->success.begin(); _iter1164 != this->success.end(); ++_iter1164) { - xfer += (*_iter1144).write(oprot); + xfer += (*_iter1164).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1145; - ::apache::thrift::protocol::TType _etype1148; - xfer += iprot->readListBegin(_etype1148, _size1145); - (*(this->success)).resize(_size1145); - uint32_t _i1149; - for (_i1149 = 0; _i1149 < _size1145; ++_i1149) + uint32_t _size1165; + ::apache::thrift::protocol::TType _etype1168; + xfer += iprot->readListBegin(_etype1168, _size1165); + (*(this->success)).resize(_size1165); + uint32_t _i1169; + for (_i1169 = 0; _i1169 < _size1165; ++_i1169) { - xfer += (*(this->success))[_i1149].read(iprot); + xfer += (*(this->success))[_i1169].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1150; - ::apache::thrift::protocol::TType _etype1153; - xfer += iprot->readListBegin(_etype1153, _size1150); - this->success.resize(_size1150); - uint32_t _i1154; - for (_i1154 = 0; _i1154 < _size1150; ++_i1154) + uint32_t _size1170; + ::apache::thrift::protocol::TType _etype1173; + xfer += iprot->readListBegin(_etype1173, _size1170); + this->success.resize(_size1170); + uint32_t _i1174; + for (_i1174 = 0; _i1174 < _size1170; ++_i1174) { - xfer += this->success[_i1154].read(iprot); + xfer += this->success[_i1174].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1155; - for (_iter1155 = this->success.begin(); _iter1155 != this->success.end(); ++_iter1155) + std::vector ::const_iterator _iter1175; + for (_iter1175 = this->success.begin(); _iter1175 != this->success.end(); ++_iter1175) { - xfer += (*_iter1155).write(oprot); + xfer += (*_iter1175).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1156; - ::apache::thrift::protocol::TType _etype1159; - xfer += iprot->readListBegin(_etype1159, _size1156); - (*(this->success)).resize(_size1156); - uint32_t _i1160; - for (_i1160 = 0; _i1160 < _size1156; ++_i1160) + uint32_t _size1176; + ::apache::thrift::protocol::TType _etype1179; + xfer += iprot->readListBegin(_etype1179, _size1176); + (*(this->success)).resize(_size1176); + uint32_t _i1180; + for (_i1180 = 0; _i1180 < _size1176; ++_i1180) { - xfer += (*(this->success))[_i1160].read(iprot); + xfer += (*(this->success))[_i1180].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1161; - ::apache::thrift::protocol::TType _etype1164; - xfer += iprot->readListBegin(_etype1164, _size1161); - this->success.resize(_size1161); - uint32_t _i1165; - for (_i1165 = 0; _i1165 < _size1161; ++_i1165) + uint32_t _size1181; + ::apache::thrift::protocol::TType _etype1184; + xfer += iprot->readListBegin(_etype1184, _size1181); + this->success.resize(_size1181); + uint32_t _i1185; + for (_i1185 = 0; _i1185 < _size1181; ++_i1185) { - xfer += this->success[_i1165].read(iprot); + xfer += this->success[_i1185].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1166; - for (_iter1166 = this->success.begin(); _iter1166 != this->success.end(); ++_iter1166) + std::vector ::const_iterator _iter1186; + for (_iter1186 = this->success.begin(); _iter1186 != this->success.end(); ++_iter1186) { - xfer += (*_iter1166).write(oprot); + xfer += (*_iter1186).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1167; - ::apache::thrift::protocol::TType _etype1170; - xfer += iprot->readListBegin(_etype1170, _size1167); - (*(this->success)).resize(_size1167); - uint32_t _i1171; - for (_i1171 = 0; _i1171 < _size1167; ++_i1171) + uint32_t _size1187; + ::apache::thrift::protocol::TType _etype1190; + xfer += iprot->readListBegin(_etype1190, _size1187); + (*(this->success)).resize(_size1187); + uint32_t _i1191; + for (_i1191 = 0; _i1191 < _size1187; ++_i1191) { - xfer += (*(this->success))[_i1171].read(iprot); + xfer += (*(this->success))[_i1191].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1172; - ::apache::thrift::protocol::TType _etype1175; - xfer += iprot->readListBegin(_etype1175, _size1172); - this->primaryKeys.resize(_size1172); - uint32_t _i1176; - for (_i1176 = 0; _i1176 < _size1172; ++_i1176) + uint32_t _size1192; + ::apache::thrift::protocol::TType _etype1195; + xfer += iprot->readListBegin(_etype1195, _size1192); + this->primaryKeys.resize(_size1192); + uint32_t _i1196; + for (_i1196 = 0; _i1196 < _size1192; ++_i1196) { - xfer += this->primaryKeys[_i1176].read(iprot); + xfer += this->primaryKeys[_i1196].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1177; - ::apache::thrift::protocol::TType _etype1180; - xfer += iprot->readListBegin(_etype1180, _size1177); - this->foreignKeys.resize(_size1177); - uint32_t _i1181; - for (_i1181 = 0; _i1181 < _size1177; ++_i1181) + uint32_t _size1197; + ::apache::thrift::protocol::TType _etype1200; + xfer += iprot->readListBegin(_etype1200, _size1197); + this->foreignKeys.resize(_size1197); + uint32_t _i1201; + for (_i1201 = 0; _i1201 < _size1197; ++_i1201) { - xfer += this->foreignKeys[_i1181].read(iprot); + xfer += this->foreignKeys[_i1201].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1182; - ::apache::thrift::protocol::TType _etype1185; - xfer += iprot->readListBegin(_etype1185, _size1182); - this->uniqueConstraints.resize(_size1182); - uint32_t _i1186; - for (_i1186 = 0; _i1186 < _size1182; ++_i1186) + uint32_t _size1202; + ::apache::thrift::protocol::TType _etype1205; + xfer += iprot->readListBegin(_etype1205, _size1202); + this->uniqueConstraints.resize(_size1202); + uint32_t _i1206; + for (_i1206 = 0; _i1206 < _size1202; ++_i1206) { - xfer += this->uniqueConstraints[_i1186].read(iprot); + xfer += this->uniqueConstraints[_i1206].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1187; - ::apache::thrift::protocol::TType _etype1190; - xfer += iprot->readListBegin(_etype1190, _size1187); - this->notNullConstraints.resize(_size1187); - uint32_t _i1191; - for (_i1191 = 0; _i1191 < _size1187; ++_i1191) + uint32_t _size1207; + ::apache::thrift::protocol::TType _etype1210; + xfer += iprot->readListBegin(_etype1210, _size1207); + this->notNullConstraints.resize(_size1207); + uint32_t _i1211; + for (_i1211 = 0; _i1211 < _size1207; ++_i1211) { - xfer += this->notNullConstraints[_i1191].read(iprot); + xfer += this->notNullConstraints[_i1211].read(iprot); } xfer += iprot->readListEnd(); } @@ -4594,6 +4594,26 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->defaultConstraints.clear(); + uint32_t _size1212; + ::apache::thrift::protocol::TType _etype1215; + xfer += iprot->readListBegin(_etype1215, _size1212); + this->defaultConstraints.resize(_size1212); + uint32_t _i1216; + for (_i1216 = 0; _i1216 < _size1212; ++_i1216) + { + xfer += this->defaultConstraints[_i1216].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.defaultConstraints = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -4618,10 +4638,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1192; - for (_iter1192 = this->primaryKeys.begin(); _iter1192 != this->primaryKeys.end(); ++_iter1192) + std::vector ::const_iterator _iter1217; + for (_iter1217 = this->primaryKeys.begin(); _iter1217 != this->primaryKeys.end(); ++_iter1217) { - xfer += (*_iter1192).write(oprot); + xfer += (*_iter1217).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4630,10 +4650,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1193; - for (_iter1193 = this->foreignKeys.begin(); _iter1193 != this->foreignKeys.end(); ++_iter1193) + std::vector ::const_iterator _iter1218; + for (_iter1218 = this->foreignKeys.begin(); _iter1218 != this->foreignKeys.end(); ++_iter1218) { - xfer += (*_iter1193).write(oprot); + xfer += (*_iter1218).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4642,10 +4662,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1194; - for (_iter1194 = this->uniqueConstraints.begin(); _iter1194 != this->uniqueConstraints.end(); ++_iter1194) + std::vector ::const_iterator _iter1219; + for (_iter1219 = this->uniqueConstraints.begin(); _iter1219 != this->uniqueConstraints.end(); ++_iter1219) { - xfer += (*_iter1194).write(oprot); + xfer += (*_iter1219).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4654,10 +4674,22 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1195; - for (_iter1195 = this->notNullConstraints.begin(); _iter1195 != this->notNullConstraints.end(); ++_iter1195) + std::vector ::const_iterator _iter1220; + for (_iter1220 = this->notNullConstraints.begin(); _iter1220 != this->notNullConstraints.end(); ++_iter1220) + { + xfer += (*_iter1220).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); + std::vector ::const_iterator _iter1221; + for (_iter1221 = this->defaultConstraints.begin(); _iter1221 != this->defaultConstraints.end(); ++_iter1221) { - xfer += (*_iter1195).write(oprot); + xfer += (*_iter1221).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4685,10 +4717,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1196; - for (_iter1196 = (*(this->primaryKeys)).begin(); _iter1196 != (*(this->primaryKeys)).end(); ++_iter1196) + std::vector ::const_iterator _iter1222; + for (_iter1222 = (*(this->primaryKeys)).begin(); _iter1222 != (*(this->primaryKeys)).end(); ++_iter1222) { - xfer += (*_iter1196).write(oprot); + xfer += (*_iter1222).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4697,10 +4729,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1197; - for (_iter1197 = (*(this->foreignKeys)).begin(); _iter1197 != (*(this->foreignKeys)).end(); ++_iter1197) + std::vector ::const_iterator _iter1223; + for (_iter1223 = (*(this->foreignKeys)).begin(); _iter1223 != (*(this->foreignKeys)).end(); ++_iter1223) { - xfer += (*_iter1197).write(oprot); + xfer += (*_iter1223).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4709,10 +4741,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1198; - for (_iter1198 = (*(this->uniqueConstraints)).begin(); _iter1198 != (*(this->uniqueConstraints)).end(); ++_iter1198) + std::vector ::const_iterator _iter1224; + for (_iter1224 = (*(this->uniqueConstraints)).begin(); _iter1224 != (*(this->uniqueConstraints)).end(); ++_iter1224) { - xfer += (*_iter1198).write(oprot); + xfer += (*_iter1224).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4721,10 +4753,22 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1199; - for (_iter1199 = (*(this->notNullConstraints)).begin(); _iter1199 != (*(this->notNullConstraints)).end(); ++_iter1199) + std::vector ::const_iterator _iter1225; + for (_iter1225 = (*(this->notNullConstraints)).begin(); _iter1225 != (*(this->notNullConstraints)).end(); ++_iter1225) + { + xfer += (*_iter1225).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->defaultConstraints)).size())); + std::vector ::const_iterator _iter1226; + for (_iter1226 = (*(this->defaultConstraints)).begin(); _iter1226 != (*(this->defaultConstraints)).end(); ++_iter1226) { - xfer += (*_iter1199).write(oprot); + xfer += (*_iter1226).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5939,6 +5983,213 @@ uint32_t ThriftHiveMetastore_add_not_null_constraint_presult::read(::apache::thr } +ThriftHiveMetastore_add_default_constraint_args::~ThriftHiveMetastore_add_default_constraint_args() throw() { +} + + +uint32_t ThriftHiveMetastore_add_default_constraint_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_add_default_constraint_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_default_constraint_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_add_default_constraint_pargs::~ThriftHiveMetastore_add_default_constraint_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_add_default_constraint_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_default_constraint_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_add_default_constraint_result::~ThriftHiveMetastore_add_default_constraint_result() throw() { +} + + +uint32_t ThriftHiveMetastore_add_default_constraint_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_add_default_constraint_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_default_constraint_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_add_default_constraint_presult::~ThriftHiveMetastore_add_default_constraint_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_add_default_constraint_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + ThriftHiveMetastore_drop_table_args::~ThriftHiveMetastore_drop_table_args() throw() { } @@ -6478,14 +6729,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1200; - ::apache::thrift::protocol::TType _etype1203; - xfer += iprot->readListBegin(_etype1203, _size1200); - this->partNames.resize(_size1200); - uint32_t _i1204; - for (_i1204 = 0; _i1204 < _size1200; ++_i1204) + uint32_t _size1227; + ::apache::thrift::protocol::TType _etype1230; + xfer += iprot->readListBegin(_etype1230, _size1227); + this->partNames.resize(_size1227); + uint32_t _i1231; + for (_i1231 = 0; _i1231 < _size1227; ++_i1231) { - xfer += iprot->readString(this->partNames[_i1204]); + xfer += iprot->readString(this->partNames[_i1231]); } xfer += iprot->readListEnd(); } @@ -6522,10 +6773,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1205; - for (_iter1205 = this->partNames.begin(); _iter1205 != this->partNames.end(); ++_iter1205) + std::vector ::const_iterator _iter1232; + for (_iter1232 = this->partNames.begin(); _iter1232 != this->partNames.end(); ++_iter1232) { - xfer += oprot->writeString((*_iter1205)); + xfer += oprot->writeString((*_iter1232)); } xfer += oprot->writeListEnd(); } @@ -6557,10 +6808,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1206; - for (_iter1206 = (*(this->partNames)).begin(); _iter1206 != (*(this->partNames)).end(); ++_iter1206) + std::vector ::const_iterator _iter1233; + for (_iter1233 = (*(this->partNames)).begin(); _iter1233 != (*(this->partNames)).end(); ++_iter1233) { - xfer += oprot->writeString((*_iter1206)); + xfer += oprot->writeString((*_iter1233)); } xfer += oprot->writeListEnd(); } @@ -6804,14 +7055,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1207; - ::apache::thrift::protocol::TType _etype1210; - xfer += iprot->readListBegin(_etype1210, _size1207); - this->success.resize(_size1207); - uint32_t _i1211; - for (_i1211 = 0; _i1211 < _size1207; ++_i1211) + uint32_t _size1234; + ::apache::thrift::protocol::TType _etype1237; + xfer += iprot->readListBegin(_etype1237, _size1234); + this->success.resize(_size1234); + uint32_t _i1238; + for (_i1238 = 0; _i1238 < _size1234; ++_i1238) { - xfer += iprot->readString(this->success[_i1211]); + xfer += iprot->readString(this->success[_i1238]); } xfer += iprot->readListEnd(); } @@ -6850,10 +7101,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1212; - for (_iter1212 = this->success.begin(); _iter1212 != this->success.end(); ++_iter1212) + std::vector ::const_iterator _iter1239; + for (_iter1239 = this->success.begin(); _iter1239 != this->success.end(); ++_iter1239) { - xfer += oprot->writeString((*_iter1212)); + xfer += oprot->writeString((*_iter1239)); } xfer += oprot->writeListEnd(); } @@ -6898,14 +7149,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1213; - ::apache::thrift::protocol::TType _etype1216; - xfer += iprot->readListBegin(_etype1216, _size1213); - (*(this->success)).resize(_size1213); - uint32_t _i1217; - for (_i1217 = 0; _i1217 < _size1213; ++_i1217) + uint32_t _size1240; + ::apache::thrift::protocol::TType _etype1243; + xfer += iprot->readListBegin(_etype1243, _size1240); + (*(this->success)).resize(_size1240); + uint32_t _i1244; + for (_i1244 = 0; _i1244 < _size1240; ++_i1244) { - xfer += iprot->readString((*(this->success))[_i1217]); + xfer += iprot->readString((*(this->success))[_i1244]); } xfer += iprot->readListEnd(); } @@ -7075,14 +7326,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1218; - ::apache::thrift::protocol::TType _etype1221; - xfer += iprot->readListBegin(_etype1221, _size1218); - this->success.resize(_size1218); - uint32_t _i1222; - for (_i1222 = 0; _i1222 < _size1218; ++_i1222) + uint32_t _size1245; + ::apache::thrift::protocol::TType _etype1248; + xfer += iprot->readListBegin(_etype1248, _size1245); + this->success.resize(_size1245); + uint32_t _i1249; + for (_i1249 = 0; _i1249 < _size1245; ++_i1249) { - xfer += iprot->readString(this->success[_i1222]); + xfer += iprot->readString(this->success[_i1249]); } xfer += iprot->readListEnd(); } @@ -7121,10 +7372,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1223; - for (_iter1223 = this->success.begin(); _iter1223 != this->success.end(); ++_iter1223) + std::vector ::const_iterator _iter1250; + for (_iter1250 = this->success.begin(); _iter1250 != this->success.end(); ++_iter1250) { - xfer += oprot->writeString((*_iter1223)); + xfer += oprot->writeString((*_iter1250)); } xfer += oprot->writeListEnd(); } @@ -7169,14 +7420,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1224; - ::apache::thrift::protocol::TType _etype1227; - xfer += iprot->readListBegin(_etype1227, _size1224); - (*(this->success)).resize(_size1224); - uint32_t _i1228; - for (_i1228 = 0; _i1228 < _size1224; ++_i1228) + uint32_t _size1251; + ::apache::thrift::protocol::TType _etype1254; + xfer += iprot->readListBegin(_etype1254, _size1251); + (*(this->success)).resize(_size1251); + uint32_t _i1255; + for (_i1255 = 0; _i1255 < _size1251; ++_i1255) { - xfer += iprot->readString((*(this->success))[_i1228]); + xfer += iprot->readString((*(this->success))[_i1255]); } xfer += iprot->readListEnd(); } @@ -7314,14 +7565,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1229; - ::apache::thrift::protocol::TType _etype1232; - xfer += iprot->readListBegin(_etype1232, _size1229); - this->success.resize(_size1229); - uint32_t _i1233; - for (_i1233 = 0; _i1233 < _size1229; ++_i1233) + uint32_t _size1256; + ::apache::thrift::protocol::TType _etype1259; + xfer += iprot->readListBegin(_etype1259, _size1256); + this->success.resize(_size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) { - xfer += iprot->readString(this->success[_i1233]); + xfer += iprot->readString(this->success[_i1260]); } xfer += iprot->readListEnd(); } @@ -7360,10 +7611,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1234; - for (_iter1234 = this->success.begin(); _iter1234 != this->success.end(); ++_iter1234) + std::vector ::const_iterator _iter1261; + for (_iter1261 = this->success.begin(); _iter1261 != this->success.end(); ++_iter1261) { - xfer += oprot->writeString((*_iter1234)); + xfer += oprot->writeString((*_iter1261)); } xfer += oprot->writeListEnd(); } @@ -7408,14 +7659,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1235; - ::apache::thrift::protocol::TType _etype1238; - xfer += iprot->readListBegin(_etype1238, _size1235); - (*(this->success)).resize(_size1235); - uint32_t _i1239; - for (_i1239 = 0; _i1239 < _size1235; ++_i1239) + uint32_t _size1262; + ::apache::thrift::protocol::TType _etype1265; + xfer += iprot->readListBegin(_etype1265, _size1262); + (*(this->success)).resize(_size1262); + uint32_t _i1266; + for (_i1266 = 0; _i1266 < _size1262; ++_i1266) { - xfer += iprot->readString((*(this->success))[_i1239]); + xfer += iprot->readString((*(this->success))[_i1266]); } xfer += iprot->readListEnd(); } @@ -7490,14 +7741,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1240; - ::apache::thrift::protocol::TType _etype1243; - xfer += iprot->readListBegin(_etype1243, _size1240); - this->tbl_types.resize(_size1240); - uint32_t _i1244; - for (_i1244 = 0; _i1244 < _size1240; ++_i1244) + uint32_t _size1267; + ::apache::thrift::protocol::TType _etype1270; + xfer += iprot->readListBegin(_etype1270, _size1267); + this->tbl_types.resize(_size1267); + uint32_t _i1271; + for (_i1271 = 0; _i1271 < _size1267; ++_i1271) { - xfer += iprot->readString(this->tbl_types[_i1244]); + xfer += iprot->readString(this->tbl_types[_i1271]); } xfer += iprot->readListEnd(); } @@ -7534,10 +7785,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1245; - for (_iter1245 = this->tbl_types.begin(); _iter1245 != this->tbl_types.end(); ++_iter1245) + std::vector ::const_iterator _iter1272; + for (_iter1272 = this->tbl_types.begin(); _iter1272 != this->tbl_types.end(); ++_iter1272) { - xfer += oprot->writeString((*_iter1245)); + xfer += oprot->writeString((*_iter1272)); } xfer += oprot->writeListEnd(); } @@ -7569,10 +7820,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1246; - for (_iter1246 = (*(this->tbl_types)).begin(); _iter1246 != (*(this->tbl_types)).end(); ++_iter1246) + std::vector ::const_iterator _iter1273; + for (_iter1273 = (*(this->tbl_types)).begin(); _iter1273 != (*(this->tbl_types)).end(); ++_iter1273) { - xfer += oprot->writeString((*_iter1246)); + xfer += oprot->writeString((*_iter1273)); } xfer += oprot->writeListEnd(); } @@ -7613,14 +7864,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1247; - ::apache::thrift::protocol::TType _etype1250; - xfer += iprot->readListBegin(_etype1250, _size1247); - this->success.resize(_size1247); - uint32_t _i1251; - for (_i1251 = 0; _i1251 < _size1247; ++_i1251) + uint32_t _size1274; + ::apache::thrift::protocol::TType _etype1277; + xfer += iprot->readListBegin(_etype1277, _size1274); + this->success.resize(_size1274); + uint32_t _i1278; + for (_i1278 = 0; _i1278 < _size1274; ++_i1278) { - xfer += this->success[_i1251].read(iprot); + xfer += this->success[_i1278].read(iprot); } xfer += iprot->readListEnd(); } @@ -7659,10 +7910,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1252; - for (_iter1252 = this->success.begin(); _iter1252 != this->success.end(); ++_iter1252) + std::vector ::const_iterator _iter1279; + for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) { - xfer += (*_iter1252).write(oprot); + xfer += (*_iter1279).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7707,14 +7958,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1253; - ::apache::thrift::protocol::TType _etype1256; - xfer += iprot->readListBegin(_etype1256, _size1253); - (*(this->success)).resize(_size1253); - uint32_t _i1257; - for (_i1257 = 0; _i1257 < _size1253; ++_i1257) + uint32_t _size1280; + ::apache::thrift::protocol::TType _etype1283; + xfer += iprot->readListBegin(_etype1283, _size1280); + (*(this->success)).resize(_size1280); + uint32_t _i1284; + for (_i1284 = 0; _i1284 < _size1280; ++_i1284) { - xfer += (*(this->success))[_i1257].read(iprot); + xfer += (*(this->success))[_i1284].read(iprot); } xfer += iprot->readListEnd(); } @@ -7852,14 +8103,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1258; - ::apache::thrift::protocol::TType _etype1261; - xfer += iprot->readListBegin(_etype1261, _size1258); - this->success.resize(_size1258); - uint32_t _i1262; - for (_i1262 = 0; _i1262 < _size1258; ++_i1262) + uint32_t _size1285; + ::apache::thrift::protocol::TType _etype1288; + xfer += iprot->readListBegin(_etype1288, _size1285); + this->success.resize(_size1285); + uint32_t _i1289; + for (_i1289 = 0; _i1289 < _size1285; ++_i1289) { - xfer += iprot->readString(this->success[_i1262]); + xfer += iprot->readString(this->success[_i1289]); } xfer += iprot->readListEnd(); } @@ -7898,10 +8149,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1263; - for (_iter1263 = this->success.begin(); _iter1263 != this->success.end(); ++_iter1263) + std::vector ::const_iterator _iter1290; + for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) { - xfer += oprot->writeString((*_iter1263)); + xfer += oprot->writeString((*_iter1290)); } xfer += oprot->writeListEnd(); } @@ -7946,14 +8197,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1264; - ::apache::thrift::protocol::TType _etype1267; - xfer += iprot->readListBegin(_etype1267, _size1264); - (*(this->success)).resize(_size1264); - uint32_t _i1268; - for (_i1268 = 0; _i1268 < _size1264; ++_i1268) + uint32_t _size1291; + ::apache::thrift::protocol::TType _etype1294; + xfer += iprot->readListBegin(_etype1294, _size1291); + (*(this->success)).resize(_size1291); + uint32_t _i1295; + for (_i1295 = 0; _i1295 < _size1291; ++_i1295) { - xfer += iprot->readString((*(this->success))[_i1268]); + xfer += iprot->readString((*(this->success))[_i1295]); } xfer += iprot->readListEnd(); } @@ -8263,14 +8514,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1269; - ::apache::thrift::protocol::TType _etype1272; - xfer += iprot->readListBegin(_etype1272, _size1269); - this->tbl_names.resize(_size1269); - uint32_t _i1273; - for (_i1273 = 0; _i1273 < _size1269; ++_i1273) + uint32_t _size1296; + ::apache::thrift::protocol::TType _etype1299; + xfer += iprot->readListBegin(_etype1299, _size1296); + this->tbl_names.resize(_size1296); + uint32_t _i1300; + for (_i1300 = 0; _i1300 < _size1296; ++_i1300) { - xfer += iprot->readString(this->tbl_names[_i1273]); + xfer += iprot->readString(this->tbl_names[_i1300]); } xfer += iprot->readListEnd(); } @@ -8303,10 +8554,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1274; - for (_iter1274 = this->tbl_names.begin(); _iter1274 != this->tbl_names.end(); ++_iter1274) + std::vector ::const_iterator _iter1301; + for (_iter1301 = this->tbl_names.begin(); _iter1301 != this->tbl_names.end(); ++_iter1301) { - xfer += oprot->writeString((*_iter1274)); + xfer += oprot->writeString((*_iter1301)); } xfer += oprot->writeListEnd(); } @@ -8334,10 +8585,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1275; - for (_iter1275 = (*(this->tbl_names)).begin(); _iter1275 != (*(this->tbl_names)).end(); ++_iter1275) + std::vector ::const_iterator _iter1302; + for (_iter1302 = (*(this->tbl_names)).begin(); _iter1302 != (*(this->tbl_names)).end(); ++_iter1302) { - xfer += oprot->writeString((*_iter1275)); + xfer += oprot->writeString((*_iter1302)); } xfer += oprot->writeListEnd(); } @@ -8378,14 +8629,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1276; - ::apache::thrift::protocol::TType _etype1279; - xfer += iprot->readListBegin(_etype1279, _size1276); - this->success.resize(_size1276); - uint32_t _i1280; - for (_i1280 = 0; _i1280 < _size1276; ++_i1280) + uint32_t _size1303; + ::apache::thrift::protocol::TType _etype1306; + xfer += iprot->readListBegin(_etype1306, _size1303); + this->success.resize(_size1303); + uint32_t _i1307; + for (_i1307 = 0; _i1307 < _size1303; ++_i1307) { - xfer += this->success[_i1280].read(iprot); + xfer += this->success[_i1307].read(iprot); } xfer += iprot->readListEnd(); } @@ -8416,10 +8667,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1281; - for (_iter1281 = this->success.begin(); _iter1281 != this->success.end(); ++_iter1281) + std::vector
::const_iterator _iter1308; + for (_iter1308 = this->success.begin(); _iter1308 != this->success.end(); ++_iter1308) { - xfer += (*_iter1281).write(oprot); + xfer += (*_iter1308).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8460,14 +8711,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1282; - ::apache::thrift::protocol::TType _etype1285; - xfer += iprot->readListBegin(_etype1285, _size1282); - (*(this->success)).resize(_size1282); - uint32_t _i1286; - for (_i1286 = 0; _i1286 < _size1282; ++_i1286) + uint32_t _size1309; + ::apache::thrift::protocol::TType _etype1312; + xfer += iprot->readListBegin(_etype1312, _size1309); + (*(this->success)).resize(_size1309); + uint32_t _i1313; + for (_i1313 = 0; _i1313 < _size1309; ++_i1313) { - xfer += (*(this->success))[_i1286].read(iprot); + xfer += (*(this->success))[_i1313].read(iprot); } xfer += iprot->readListEnd(); } @@ -9000,14 +9251,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1287; - ::apache::thrift::protocol::TType _etype1290; - xfer += iprot->readListBegin(_etype1290, _size1287); - this->tbl_names.resize(_size1287); - uint32_t _i1291; - for (_i1291 = 0; _i1291 < _size1287; ++_i1291) + uint32_t _size1314; + ::apache::thrift::protocol::TType _etype1317; + xfer += iprot->readListBegin(_etype1317, _size1314); + this->tbl_names.resize(_size1314); + uint32_t _i1318; + for (_i1318 = 0; _i1318 < _size1314; ++_i1318) { - xfer += iprot->readString(this->tbl_names[_i1291]); + xfer += iprot->readString(this->tbl_names[_i1318]); } xfer += iprot->readListEnd(); } @@ -9040,10 +9291,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1292; - for (_iter1292 = this->tbl_names.begin(); _iter1292 != this->tbl_names.end(); ++_iter1292) + std::vector ::const_iterator _iter1319; + for (_iter1319 = this->tbl_names.begin(); _iter1319 != this->tbl_names.end(); ++_iter1319) { - xfer += oprot->writeString((*_iter1292)); + xfer += oprot->writeString((*_iter1319)); } xfer += oprot->writeListEnd(); } @@ -9071,10 +9322,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1293; - for (_iter1293 = (*(this->tbl_names)).begin(); _iter1293 != (*(this->tbl_names)).end(); ++_iter1293) + std::vector ::const_iterator _iter1320; + for (_iter1320 = (*(this->tbl_names)).begin(); _iter1320 != (*(this->tbl_names)).end(); ++_iter1320) { - xfer += oprot->writeString((*_iter1293)); + xfer += oprot->writeString((*_iter1320)); } xfer += oprot->writeListEnd(); } @@ -9115,17 +9366,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1294; - ::apache::thrift::protocol::TType _ktype1295; - ::apache::thrift::protocol::TType _vtype1296; - xfer += iprot->readMapBegin(_ktype1295, _vtype1296, _size1294); - uint32_t _i1298; - for (_i1298 = 0; _i1298 < _size1294; ++_i1298) + uint32_t _size1321; + ::apache::thrift::protocol::TType _ktype1322; + ::apache::thrift::protocol::TType _vtype1323; + xfer += iprot->readMapBegin(_ktype1322, _vtype1323, _size1321); + uint32_t _i1325; + for (_i1325 = 0; _i1325 < _size1321; ++_i1325) { - std::string _key1299; - xfer += iprot->readString(_key1299); - Materialization& _val1300 = this->success[_key1299]; - xfer += _val1300.read(iprot); + std::string _key1326; + xfer += iprot->readString(_key1326); + Materialization& _val1327 = this->success[_key1326]; + xfer += _val1327.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9180,11 +9431,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1301; - for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) + std::map ::const_iterator _iter1328; + for (_iter1328 = this->success.begin(); _iter1328 != this->success.end(); ++_iter1328) { - xfer += oprot->writeString(_iter1301->first); - xfer += _iter1301->second.write(oprot); + xfer += oprot->writeString(_iter1328->first); + xfer += _iter1328->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -9237,17 +9488,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1302; - ::apache::thrift::protocol::TType _ktype1303; - ::apache::thrift::protocol::TType _vtype1304; - xfer += iprot->readMapBegin(_ktype1303, _vtype1304, _size1302); - uint32_t _i1306; - for (_i1306 = 0; _i1306 < _size1302; ++_i1306) + uint32_t _size1329; + ::apache::thrift::protocol::TType _ktype1330; + ::apache::thrift::protocol::TType _vtype1331; + xfer += iprot->readMapBegin(_ktype1330, _vtype1331, _size1329); + uint32_t _i1333; + for (_i1333 = 0; _i1333 < _size1329; ++_i1333) { - std::string _key1307; - xfer += iprot->readString(_key1307); - Materialization& _val1308 = (*(this->success))[_key1307]; - xfer += _val1308.read(iprot); + std::string _key1334; + xfer += iprot->readString(_key1334); + Materialization& _val1335 = (*(this->success))[_key1334]; + xfer += _val1335.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9692,14 +9943,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1309; - ::apache::thrift::protocol::TType _etype1312; - xfer += iprot->readListBegin(_etype1312, _size1309); - this->success.resize(_size1309); - uint32_t _i1313; - for (_i1313 = 0; _i1313 < _size1309; ++_i1313) + uint32_t _size1336; + ::apache::thrift::protocol::TType _etype1339; + xfer += iprot->readListBegin(_etype1339, _size1336); + this->success.resize(_size1336); + uint32_t _i1340; + for (_i1340 = 0; _i1340 < _size1336; ++_i1340) { - xfer += iprot->readString(this->success[_i1313]); + xfer += iprot->readString(this->success[_i1340]); } xfer += iprot->readListEnd(); } @@ -9754,10 +10005,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1314; - for (_iter1314 = this->success.begin(); _iter1314 != this->success.end(); ++_iter1314) + std::vector ::const_iterator _iter1341; + for (_iter1341 = this->success.begin(); _iter1341 != this->success.end(); ++_iter1341) { - xfer += oprot->writeString((*_iter1314)); + xfer += oprot->writeString((*_iter1341)); } xfer += oprot->writeListEnd(); } @@ -9810,14 +10061,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1315; - ::apache::thrift::protocol::TType _etype1318; - xfer += iprot->readListBegin(_etype1318, _size1315); - (*(this->success)).resize(_size1315); - uint32_t _i1319; - for (_i1319 = 0; _i1319 < _size1315; ++_i1319) + uint32_t _size1342; + ::apache::thrift::protocol::TType _etype1345; + xfer += iprot->readListBegin(_etype1345, _size1342); + (*(this->success)).resize(_size1342); + uint32_t _i1346; + for (_i1346 = 0; _i1346 < _size1342; ++_i1346) { - xfer += iprot->readString((*(this->success))[_i1319]); + xfer += iprot->readString((*(this->success))[_i1346]); } xfer += iprot->readListEnd(); } @@ -11151,14 +11402,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1320; - ::apache::thrift::protocol::TType _etype1323; - xfer += iprot->readListBegin(_etype1323, _size1320); - this->new_parts.resize(_size1320); - uint32_t _i1324; - for (_i1324 = 0; _i1324 < _size1320; ++_i1324) + uint32_t _size1347; + ::apache::thrift::protocol::TType _etype1350; + xfer += iprot->readListBegin(_etype1350, _size1347); + this->new_parts.resize(_size1347); + uint32_t _i1351; + for (_i1351 = 0; _i1351 < _size1347; ++_i1351) { - xfer += this->new_parts[_i1324].read(iprot); + xfer += this->new_parts[_i1351].read(iprot); } xfer += iprot->readListEnd(); } @@ -11187,10 +11438,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1325; - for (_iter1325 = this->new_parts.begin(); _iter1325 != this->new_parts.end(); ++_iter1325) + std::vector ::const_iterator _iter1352; + for (_iter1352 = this->new_parts.begin(); _iter1352 != this->new_parts.end(); ++_iter1352) { - xfer += (*_iter1325).write(oprot); + xfer += (*_iter1352).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11214,10 +11465,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1326; - for (_iter1326 = (*(this->new_parts)).begin(); _iter1326 != (*(this->new_parts)).end(); ++_iter1326) + std::vector ::const_iterator _iter1353; + for (_iter1353 = (*(this->new_parts)).begin(); _iter1353 != (*(this->new_parts)).end(); ++_iter1353) { - xfer += (*_iter1326).write(oprot); + xfer += (*_iter1353).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11426,14 +11677,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1327; - ::apache::thrift::protocol::TType _etype1330; - xfer += iprot->readListBegin(_etype1330, _size1327); - this->new_parts.resize(_size1327); - uint32_t _i1331; - for (_i1331 = 0; _i1331 < _size1327; ++_i1331) + uint32_t _size1354; + ::apache::thrift::protocol::TType _etype1357; + xfer += iprot->readListBegin(_etype1357, _size1354); + this->new_parts.resize(_size1354); + uint32_t _i1358; + for (_i1358 = 0; _i1358 < _size1354; ++_i1358) { - xfer += this->new_parts[_i1331].read(iprot); + xfer += this->new_parts[_i1358].read(iprot); } xfer += iprot->readListEnd(); } @@ -11462,10 +11713,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1332; - for (_iter1332 = this->new_parts.begin(); _iter1332 != this->new_parts.end(); ++_iter1332) + std::vector ::const_iterator _iter1359; + for (_iter1359 = this->new_parts.begin(); _iter1359 != this->new_parts.end(); ++_iter1359) { - xfer += (*_iter1332).write(oprot); + xfer += (*_iter1359).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11489,10 +11740,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1333; - for (_iter1333 = (*(this->new_parts)).begin(); _iter1333 != (*(this->new_parts)).end(); ++_iter1333) + std::vector ::const_iterator _iter1360; + for (_iter1360 = (*(this->new_parts)).begin(); _iter1360 != (*(this->new_parts)).end(); ++_iter1360) { - xfer += (*_iter1333).write(oprot); + xfer += (*_iter1360).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11717,14 +11968,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1334; - ::apache::thrift::protocol::TType _etype1337; - xfer += iprot->readListBegin(_etype1337, _size1334); - this->part_vals.resize(_size1334); - uint32_t _i1338; - for (_i1338 = 0; _i1338 < _size1334; ++_i1338) + uint32_t _size1361; + ::apache::thrift::protocol::TType _etype1364; + xfer += iprot->readListBegin(_etype1364, _size1361); + this->part_vals.resize(_size1361); + uint32_t _i1365; + for (_i1365 = 0; _i1365 < _size1361; ++_i1365) { - xfer += iprot->readString(this->part_vals[_i1338]); + xfer += iprot->readString(this->part_vals[_i1365]); } xfer += iprot->readListEnd(); } @@ -11761,10 +12012,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1339; - for (_iter1339 = this->part_vals.begin(); _iter1339 != this->part_vals.end(); ++_iter1339) + std::vector ::const_iterator _iter1366; + for (_iter1366 = this->part_vals.begin(); _iter1366 != this->part_vals.end(); ++_iter1366) { - xfer += oprot->writeString((*_iter1339)); + xfer += oprot->writeString((*_iter1366)); } xfer += oprot->writeListEnd(); } @@ -11796,10 +12047,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1340; - for (_iter1340 = (*(this->part_vals)).begin(); _iter1340 != (*(this->part_vals)).end(); ++_iter1340) + std::vector ::const_iterator _iter1367; + for (_iter1367 = (*(this->part_vals)).begin(); _iter1367 != (*(this->part_vals)).end(); ++_iter1367) { - xfer += oprot->writeString((*_iter1340)); + xfer += oprot->writeString((*_iter1367)); } xfer += oprot->writeListEnd(); } @@ -12271,14 +12522,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1341; - ::apache::thrift::protocol::TType _etype1344; - xfer += iprot->readListBegin(_etype1344, _size1341); - this->part_vals.resize(_size1341); - uint32_t _i1345; - for (_i1345 = 0; _i1345 < _size1341; ++_i1345) + uint32_t _size1368; + ::apache::thrift::protocol::TType _etype1371; + xfer += iprot->readListBegin(_etype1371, _size1368); + this->part_vals.resize(_size1368); + uint32_t _i1372; + for (_i1372 = 0; _i1372 < _size1368; ++_i1372) { - xfer += iprot->readString(this->part_vals[_i1345]); + xfer += iprot->readString(this->part_vals[_i1372]); } xfer += iprot->readListEnd(); } @@ -12323,10 +12574,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1346; - for (_iter1346 = this->part_vals.begin(); _iter1346 != this->part_vals.end(); ++_iter1346) + std::vector ::const_iterator _iter1373; + for (_iter1373 = this->part_vals.begin(); _iter1373 != this->part_vals.end(); ++_iter1373) { - xfer += oprot->writeString((*_iter1346)); + xfer += oprot->writeString((*_iter1373)); } xfer += oprot->writeListEnd(); } @@ -12362,10 +12613,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1347; - for (_iter1347 = (*(this->part_vals)).begin(); _iter1347 != (*(this->part_vals)).end(); ++_iter1347) + std::vector ::const_iterator _iter1374; + for (_iter1374 = (*(this->part_vals)).begin(); _iter1374 != (*(this->part_vals)).end(); ++_iter1374) { - xfer += oprot->writeString((*_iter1347)); + xfer += oprot->writeString((*_iter1374)); } xfer += oprot->writeListEnd(); } @@ -13168,14 +13419,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1348; - ::apache::thrift::protocol::TType _etype1351; - xfer += iprot->readListBegin(_etype1351, _size1348); - this->part_vals.resize(_size1348); - uint32_t _i1352; - for (_i1352 = 0; _i1352 < _size1348; ++_i1352) + uint32_t _size1375; + ::apache::thrift::protocol::TType _etype1378; + xfer += iprot->readListBegin(_etype1378, _size1375); + this->part_vals.resize(_size1375); + uint32_t _i1379; + for (_i1379 = 0; _i1379 < _size1375; ++_i1379) { - xfer += iprot->readString(this->part_vals[_i1352]); + xfer += iprot->readString(this->part_vals[_i1379]); } xfer += iprot->readListEnd(); } @@ -13220,10 +13471,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1353; - for (_iter1353 = this->part_vals.begin(); _iter1353 != this->part_vals.end(); ++_iter1353) + std::vector ::const_iterator _iter1380; + for (_iter1380 = this->part_vals.begin(); _iter1380 != this->part_vals.end(); ++_iter1380) { - xfer += oprot->writeString((*_iter1353)); + xfer += oprot->writeString((*_iter1380)); } xfer += oprot->writeListEnd(); } @@ -13259,10 +13510,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1354; - for (_iter1354 = (*(this->part_vals)).begin(); _iter1354 != (*(this->part_vals)).end(); ++_iter1354) + std::vector ::const_iterator _iter1381; + for (_iter1381 = (*(this->part_vals)).begin(); _iter1381 != (*(this->part_vals)).end(); ++_iter1381) { - xfer += oprot->writeString((*_iter1354)); + xfer += oprot->writeString((*_iter1381)); } xfer += oprot->writeListEnd(); } @@ -13471,14 +13722,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1355; - ::apache::thrift::protocol::TType _etype1358; - xfer += iprot->readListBegin(_etype1358, _size1355); - this->part_vals.resize(_size1355); - uint32_t _i1359; - for (_i1359 = 0; _i1359 < _size1355; ++_i1359) + uint32_t _size1382; + ::apache::thrift::protocol::TType _etype1385; + xfer += iprot->readListBegin(_etype1385, _size1382); + this->part_vals.resize(_size1382); + uint32_t _i1386; + for (_i1386 = 0; _i1386 < _size1382; ++_i1386) { - xfer += iprot->readString(this->part_vals[_i1359]); + xfer += iprot->readString(this->part_vals[_i1386]); } xfer += iprot->readListEnd(); } @@ -13531,10 +13782,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1360; - for (_iter1360 = this->part_vals.begin(); _iter1360 != this->part_vals.end(); ++_iter1360) + std::vector ::const_iterator _iter1387; + for (_iter1387 = this->part_vals.begin(); _iter1387 != this->part_vals.end(); ++_iter1387) { - xfer += oprot->writeString((*_iter1360)); + xfer += oprot->writeString((*_iter1387)); } xfer += oprot->writeListEnd(); } @@ -13574,10 +13825,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1361; - for (_iter1361 = (*(this->part_vals)).begin(); _iter1361 != (*(this->part_vals)).end(); ++_iter1361) + std::vector ::const_iterator _iter1388; + for (_iter1388 = (*(this->part_vals)).begin(); _iter1388 != (*(this->part_vals)).end(); ++_iter1388) { - xfer += oprot->writeString((*_iter1361)); + xfer += oprot->writeString((*_iter1388)); } xfer += oprot->writeListEnd(); } @@ -14583,14 +14834,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1362; - ::apache::thrift::protocol::TType _etype1365; - xfer += iprot->readListBegin(_etype1365, _size1362); - this->part_vals.resize(_size1362); - uint32_t _i1366; - for (_i1366 = 0; _i1366 < _size1362; ++_i1366) + uint32_t _size1389; + ::apache::thrift::protocol::TType _etype1392; + xfer += iprot->readListBegin(_etype1392, _size1389); + this->part_vals.resize(_size1389); + uint32_t _i1393; + for (_i1393 = 0; _i1393 < _size1389; ++_i1393) { - xfer += iprot->readString(this->part_vals[_i1366]); + xfer += iprot->readString(this->part_vals[_i1393]); } xfer += iprot->readListEnd(); } @@ -14627,10 +14878,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1367; - for (_iter1367 = this->part_vals.begin(); _iter1367 != this->part_vals.end(); ++_iter1367) + std::vector ::const_iterator _iter1394; + for (_iter1394 = this->part_vals.begin(); _iter1394 != this->part_vals.end(); ++_iter1394) { - xfer += oprot->writeString((*_iter1367)); + xfer += oprot->writeString((*_iter1394)); } xfer += oprot->writeListEnd(); } @@ -14662,10 +14913,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1368; - for (_iter1368 = (*(this->part_vals)).begin(); _iter1368 != (*(this->part_vals)).end(); ++_iter1368) + std::vector ::const_iterator _iter1395; + for (_iter1395 = (*(this->part_vals)).begin(); _iter1395 != (*(this->part_vals)).end(); ++_iter1395) { - xfer += oprot->writeString((*_iter1368)); + xfer += oprot->writeString((*_iter1395)); } xfer += oprot->writeListEnd(); } @@ -14854,17 +15105,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1369; - ::apache::thrift::protocol::TType _ktype1370; - ::apache::thrift::protocol::TType _vtype1371; - xfer += iprot->readMapBegin(_ktype1370, _vtype1371, _size1369); - uint32_t _i1373; - for (_i1373 = 0; _i1373 < _size1369; ++_i1373) + uint32_t _size1396; + ::apache::thrift::protocol::TType _ktype1397; + ::apache::thrift::protocol::TType _vtype1398; + xfer += iprot->readMapBegin(_ktype1397, _vtype1398, _size1396); + uint32_t _i1400; + for (_i1400 = 0; _i1400 < _size1396; ++_i1400) { - std::string _key1374; - xfer += iprot->readString(_key1374); - std::string& _val1375 = this->partitionSpecs[_key1374]; - xfer += iprot->readString(_val1375); + std::string _key1401; + xfer += iprot->readString(_key1401); + std::string& _val1402 = this->partitionSpecs[_key1401]; + xfer += iprot->readString(_val1402); } xfer += iprot->readMapEnd(); } @@ -14925,11 +15176,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1376; - for (_iter1376 = this->partitionSpecs.begin(); _iter1376 != this->partitionSpecs.end(); ++_iter1376) + std::map ::const_iterator _iter1403; + for (_iter1403 = this->partitionSpecs.begin(); _iter1403 != this->partitionSpecs.end(); ++_iter1403) { - xfer += oprot->writeString(_iter1376->first); - xfer += oprot->writeString(_iter1376->second); + xfer += oprot->writeString(_iter1403->first); + xfer += oprot->writeString(_iter1403->second); } xfer += oprot->writeMapEnd(); } @@ -14969,11 +15220,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1377; - for (_iter1377 = (*(this->partitionSpecs)).begin(); _iter1377 != (*(this->partitionSpecs)).end(); ++_iter1377) + std::map ::const_iterator _iter1404; + for (_iter1404 = (*(this->partitionSpecs)).begin(); _iter1404 != (*(this->partitionSpecs)).end(); ++_iter1404) { - xfer += oprot->writeString(_iter1377->first); - xfer += oprot->writeString(_iter1377->second); + xfer += oprot->writeString(_iter1404->first); + xfer += oprot->writeString(_iter1404->second); } xfer += oprot->writeMapEnd(); } @@ -15218,17 +15469,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1378; - ::apache::thrift::protocol::TType _ktype1379; - ::apache::thrift::protocol::TType _vtype1380; - xfer += iprot->readMapBegin(_ktype1379, _vtype1380, _size1378); - uint32_t _i1382; - for (_i1382 = 0; _i1382 < _size1378; ++_i1382) + uint32_t _size1405; + ::apache::thrift::protocol::TType _ktype1406; + ::apache::thrift::protocol::TType _vtype1407; + xfer += iprot->readMapBegin(_ktype1406, _vtype1407, _size1405); + uint32_t _i1409; + for (_i1409 = 0; _i1409 < _size1405; ++_i1409) { - std::string _key1383; - xfer += iprot->readString(_key1383); - std::string& _val1384 = this->partitionSpecs[_key1383]; - xfer += iprot->readString(_val1384); + std::string _key1410; + xfer += iprot->readString(_key1410); + std::string& _val1411 = this->partitionSpecs[_key1410]; + xfer += iprot->readString(_val1411); } xfer += iprot->readMapEnd(); } @@ -15289,11 +15540,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1385; - for (_iter1385 = this->partitionSpecs.begin(); _iter1385 != this->partitionSpecs.end(); ++_iter1385) + std::map ::const_iterator _iter1412; + for (_iter1412 = this->partitionSpecs.begin(); _iter1412 != this->partitionSpecs.end(); ++_iter1412) { - xfer += oprot->writeString(_iter1385->first); - xfer += oprot->writeString(_iter1385->second); + xfer += oprot->writeString(_iter1412->first); + xfer += oprot->writeString(_iter1412->second); } xfer += oprot->writeMapEnd(); } @@ -15333,11 +15584,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1386; - for (_iter1386 = (*(this->partitionSpecs)).begin(); _iter1386 != (*(this->partitionSpecs)).end(); ++_iter1386) + std::map ::const_iterator _iter1413; + for (_iter1413 = (*(this->partitionSpecs)).begin(); _iter1413 != (*(this->partitionSpecs)).end(); ++_iter1413) { - xfer += oprot->writeString(_iter1386->first); - xfer += oprot->writeString(_iter1386->second); + xfer += oprot->writeString(_iter1413->first); + xfer += oprot->writeString(_iter1413->second); } xfer += oprot->writeMapEnd(); } @@ -15394,14 +15645,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1387; - ::apache::thrift::protocol::TType _etype1390; - xfer += iprot->readListBegin(_etype1390, _size1387); - this->success.resize(_size1387); - uint32_t _i1391; - for (_i1391 = 0; _i1391 < _size1387; ++_i1391) + uint32_t _size1414; + ::apache::thrift::protocol::TType _etype1417; + xfer += iprot->readListBegin(_etype1417, _size1414); + this->success.resize(_size1414); + uint32_t _i1418; + for (_i1418 = 0; _i1418 < _size1414; ++_i1418) { - xfer += this->success[_i1391].read(iprot); + xfer += this->success[_i1418].read(iprot); } xfer += iprot->readListEnd(); } @@ -15464,10 +15715,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1392; - for (_iter1392 = this->success.begin(); _iter1392 != this->success.end(); ++_iter1392) + std::vector ::const_iterator _iter1419; + for (_iter1419 = this->success.begin(); _iter1419 != this->success.end(); ++_iter1419) { - xfer += (*_iter1392).write(oprot); + xfer += (*_iter1419).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15524,14 +15775,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1393; - ::apache::thrift::protocol::TType _etype1396; - xfer += iprot->readListBegin(_etype1396, _size1393); - (*(this->success)).resize(_size1393); - uint32_t _i1397; - for (_i1397 = 0; _i1397 < _size1393; ++_i1397) + uint32_t _size1420; + ::apache::thrift::protocol::TType _etype1423; + xfer += iprot->readListBegin(_etype1423, _size1420); + (*(this->success)).resize(_size1420); + uint32_t _i1424; + for (_i1424 = 0; _i1424 < _size1420; ++_i1424) { - xfer += (*(this->success))[_i1397].read(iprot); + xfer += (*(this->success))[_i1424].read(iprot); } xfer += iprot->readListEnd(); } @@ -15630,14 +15881,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1398; - ::apache::thrift::protocol::TType _etype1401; - xfer += iprot->readListBegin(_etype1401, _size1398); - this->part_vals.resize(_size1398); - uint32_t _i1402; - for (_i1402 = 0; _i1402 < _size1398; ++_i1402) + uint32_t _size1425; + ::apache::thrift::protocol::TType _etype1428; + xfer += iprot->readListBegin(_etype1428, _size1425); + this->part_vals.resize(_size1425); + uint32_t _i1429; + for (_i1429 = 0; _i1429 < _size1425; ++_i1429) { - xfer += iprot->readString(this->part_vals[_i1402]); + xfer += iprot->readString(this->part_vals[_i1429]); } xfer += iprot->readListEnd(); } @@ -15658,14 +15909,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1403; - ::apache::thrift::protocol::TType _etype1406; - xfer += iprot->readListBegin(_etype1406, _size1403); - this->group_names.resize(_size1403); - uint32_t _i1407; - for (_i1407 = 0; _i1407 < _size1403; ++_i1407) + uint32_t _size1430; + ::apache::thrift::protocol::TType _etype1433; + xfer += iprot->readListBegin(_etype1433, _size1430); + this->group_names.resize(_size1430); + uint32_t _i1434; + for (_i1434 = 0; _i1434 < _size1430; ++_i1434) { - xfer += iprot->readString(this->group_names[_i1407]); + xfer += iprot->readString(this->group_names[_i1434]); } xfer += iprot->readListEnd(); } @@ -15702,10 +15953,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1408; - for (_iter1408 = this->part_vals.begin(); _iter1408 != this->part_vals.end(); ++_iter1408) + std::vector ::const_iterator _iter1435; + for (_iter1435 = this->part_vals.begin(); _iter1435 != this->part_vals.end(); ++_iter1435) { - xfer += oprot->writeString((*_iter1408)); + xfer += oprot->writeString((*_iter1435)); } xfer += oprot->writeListEnd(); } @@ -15718,10 +15969,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1409; - for (_iter1409 = this->group_names.begin(); _iter1409 != this->group_names.end(); ++_iter1409) + std::vector ::const_iterator _iter1436; + for (_iter1436 = this->group_names.begin(); _iter1436 != this->group_names.end(); ++_iter1436) { - xfer += oprot->writeString((*_iter1409)); + xfer += oprot->writeString((*_iter1436)); } xfer += oprot->writeListEnd(); } @@ -15753,10 +16004,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1410; - for (_iter1410 = (*(this->part_vals)).begin(); _iter1410 != (*(this->part_vals)).end(); ++_iter1410) + std::vector ::const_iterator _iter1437; + for (_iter1437 = (*(this->part_vals)).begin(); _iter1437 != (*(this->part_vals)).end(); ++_iter1437) { - xfer += oprot->writeString((*_iter1410)); + xfer += oprot->writeString((*_iter1437)); } xfer += oprot->writeListEnd(); } @@ -15769,10 +16020,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1411; - for (_iter1411 = (*(this->group_names)).begin(); _iter1411 != (*(this->group_names)).end(); ++_iter1411) + std::vector ::const_iterator _iter1438; + for (_iter1438 = (*(this->group_names)).begin(); _iter1438 != (*(this->group_names)).end(); ++_iter1438) { - xfer += oprot->writeString((*_iter1411)); + xfer += oprot->writeString((*_iter1438)); } xfer += oprot->writeListEnd(); } @@ -16331,14 +16582,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1412; - ::apache::thrift::protocol::TType _etype1415; - xfer += iprot->readListBegin(_etype1415, _size1412); - this->success.resize(_size1412); - uint32_t _i1416; - for (_i1416 = 0; _i1416 < _size1412; ++_i1416) + uint32_t _size1439; + ::apache::thrift::protocol::TType _etype1442; + xfer += iprot->readListBegin(_etype1442, _size1439); + this->success.resize(_size1439); + uint32_t _i1443; + for (_i1443 = 0; _i1443 < _size1439; ++_i1443) { - xfer += this->success[_i1416].read(iprot); + xfer += this->success[_i1443].read(iprot); } xfer += iprot->readListEnd(); } @@ -16385,10 +16636,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1417; - for (_iter1417 = this->success.begin(); _iter1417 != this->success.end(); ++_iter1417) + std::vector ::const_iterator _iter1444; + for (_iter1444 = this->success.begin(); _iter1444 != this->success.end(); ++_iter1444) { - xfer += (*_iter1417).write(oprot); + xfer += (*_iter1444).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16437,14 +16688,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1418; - ::apache::thrift::protocol::TType _etype1421; - xfer += iprot->readListBegin(_etype1421, _size1418); - (*(this->success)).resize(_size1418); - uint32_t _i1422; - for (_i1422 = 0; _i1422 < _size1418; ++_i1422) + uint32_t _size1445; + ::apache::thrift::protocol::TType _etype1448; + xfer += iprot->readListBegin(_etype1448, _size1445); + (*(this->success)).resize(_size1445); + uint32_t _i1449; + for (_i1449 = 0; _i1449 < _size1445; ++_i1449) { - xfer += (*(this->success))[_i1422].read(iprot); + xfer += (*(this->success))[_i1449].read(iprot); } xfer += iprot->readListEnd(); } @@ -16543,14 +16794,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1423; - ::apache::thrift::protocol::TType _etype1426; - xfer += iprot->readListBegin(_etype1426, _size1423); - this->group_names.resize(_size1423); - uint32_t _i1427; - for (_i1427 = 0; _i1427 < _size1423; ++_i1427) + uint32_t _size1450; + ::apache::thrift::protocol::TType _etype1453; + xfer += iprot->readListBegin(_etype1453, _size1450); + this->group_names.resize(_size1450); + uint32_t _i1454; + for (_i1454 = 0; _i1454 < _size1450; ++_i1454) { - xfer += iprot->readString(this->group_names[_i1427]); + xfer += iprot->readString(this->group_names[_i1454]); } xfer += iprot->readListEnd(); } @@ -16595,10 +16846,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1428; - for (_iter1428 = this->group_names.begin(); _iter1428 != this->group_names.end(); ++_iter1428) + std::vector ::const_iterator _iter1455; + for (_iter1455 = this->group_names.begin(); _iter1455 != this->group_names.end(); ++_iter1455) { - xfer += oprot->writeString((*_iter1428)); + xfer += oprot->writeString((*_iter1455)); } xfer += oprot->writeListEnd(); } @@ -16638,10 +16889,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1429; - for (_iter1429 = (*(this->group_names)).begin(); _iter1429 != (*(this->group_names)).end(); ++_iter1429) + std::vector ::const_iterator _iter1456; + for (_iter1456 = (*(this->group_names)).begin(); _iter1456 != (*(this->group_names)).end(); ++_iter1456) { - xfer += oprot->writeString((*_iter1429)); + xfer += oprot->writeString((*_iter1456)); } xfer += oprot->writeListEnd(); } @@ -16682,14 +16933,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1430; - ::apache::thrift::protocol::TType _etype1433; - xfer += iprot->readListBegin(_etype1433, _size1430); - this->success.resize(_size1430); - uint32_t _i1434; - for (_i1434 = 0; _i1434 < _size1430; ++_i1434) + uint32_t _size1457; + ::apache::thrift::protocol::TType _etype1460; + xfer += iprot->readListBegin(_etype1460, _size1457); + this->success.resize(_size1457); + uint32_t _i1461; + for (_i1461 = 0; _i1461 < _size1457; ++_i1461) { - xfer += this->success[_i1434].read(iprot); + xfer += this->success[_i1461].read(iprot); } xfer += iprot->readListEnd(); } @@ -16736,10 +16987,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1435; - for (_iter1435 = this->success.begin(); _iter1435 != this->success.end(); ++_iter1435) + std::vector ::const_iterator _iter1462; + for (_iter1462 = this->success.begin(); _iter1462 != this->success.end(); ++_iter1462) { - xfer += (*_iter1435).write(oprot); + xfer += (*_iter1462).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16788,14 +17039,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1436; - ::apache::thrift::protocol::TType _etype1439; - xfer += iprot->readListBegin(_etype1439, _size1436); - (*(this->success)).resize(_size1436); - uint32_t _i1440; - for (_i1440 = 0; _i1440 < _size1436; ++_i1440) + uint32_t _size1463; + ::apache::thrift::protocol::TType _etype1466; + xfer += iprot->readListBegin(_etype1466, _size1463); + (*(this->success)).resize(_size1463); + uint32_t _i1467; + for (_i1467 = 0; _i1467 < _size1463; ++_i1467) { - xfer += (*(this->success))[_i1440].read(iprot); + xfer += (*(this->success))[_i1467].read(iprot); } xfer += iprot->readListEnd(); } @@ -16973,14 +17224,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1441; - ::apache::thrift::protocol::TType _etype1444; - xfer += iprot->readListBegin(_etype1444, _size1441); - this->success.resize(_size1441); - uint32_t _i1445; - for (_i1445 = 0; _i1445 < _size1441; ++_i1445) + uint32_t _size1468; + ::apache::thrift::protocol::TType _etype1471; + xfer += iprot->readListBegin(_etype1471, _size1468); + this->success.resize(_size1468); + uint32_t _i1472; + for (_i1472 = 0; _i1472 < _size1468; ++_i1472) { - xfer += this->success[_i1445].read(iprot); + xfer += this->success[_i1472].read(iprot); } xfer += iprot->readListEnd(); } @@ -17027,10 +17278,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1446; - for (_iter1446 = this->success.begin(); _iter1446 != this->success.end(); ++_iter1446) + std::vector ::const_iterator _iter1473; + for (_iter1473 = this->success.begin(); _iter1473 != this->success.end(); ++_iter1473) { - xfer += (*_iter1446).write(oprot); + xfer += (*_iter1473).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17079,14 +17330,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1447; - ::apache::thrift::protocol::TType _etype1450; - xfer += iprot->readListBegin(_etype1450, _size1447); - (*(this->success)).resize(_size1447); - uint32_t _i1451; - for (_i1451 = 0; _i1451 < _size1447; ++_i1451) + uint32_t _size1474; + ::apache::thrift::protocol::TType _etype1477; + xfer += iprot->readListBegin(_etype1477, _size1474); + (*(this->success)).resize(_size1474); + uint32_t _i1478; + for (_i1478 = 0; _i1478 < _size1474; ++_i1478) { - xfer += (*(this->success))[_i1451].read(iprot); + xfer += (*(this->success))[_i1478].read(iprot); } xfer += iprot->readListEnd(); } @@ -17264,14 +17515,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1452; - ::apache::thrift::protocol::TType _etype1455; - xfer += iprot->readListBegin(_etype1455, _size1452); - this->success.resize(_size1452); - uint32_t _i1456; - for (_i1456 = 0; _i1456 < _size1452; ++_i1456) + uint32_t _size1479; + ::apache::thrift::protocol::TType _etype1482; + xfer += iprot->readListBegin(_etype1482, _size1479); + this->success.resize(_size1479); + uint32_t _i1483; + for (_i1483 = 0; _i1483 < _size1479; ++_i1483) { - xfer += iprot->readString(this->success[_i1456]); + xfer += iprot->readString(this->success[_i1483]); } xfer += iprot->readListEnd(); } @@ -17318,10 +17569,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1457; - for (_iter1457 = this->success.begin(); _iter1457 != this->success.end(); ++_iter1457) + std::vector ::const_iterator _iter1484; + for (_iter1484 = this->success.begin(); _iter1484 != this->success.end(); ++_iter1484) { - xfer += oprot->writeString((*_iter1457)); + xfer += oprot->writeString((*_iter1484)); } xfer += oprot->writeListEnd(); } @@ -17370,14 +17621,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1458; - ::apache::thrift::protocol::TType _etype1461; - xfer += iprot->readListBegin(_etype1461, _size1458); - (*(this->success)).resize(_size1458); - uint32_t _i1462; - for (_i1462 = 0; _i1462 < _size1458; ++_i1462) + uint32_t _size1485; + ::apache::thrift::protocol::TType _etype1488; + xfer += iprot->readListBegin(_etype1488, _size1485); + (*(this->success)).resize(_size1485); + uint32_t _i1489; + for (_i1489 = 0; _i1489 < _size1485; ++_i1489) { - xfer += iprot->readString((*(this->success))[_i1462]); + xfer += iprot->readString((*(this->success))[_i1489]); } xfer += iprot->readListEnd(); } @@ -17687,14 +17938,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1463; - ::apache::thrift::protocol::TType _etype1466; - xfer += iprot->readListBegin(_etype1466, _size1463); - this->part_vals.resize(_size1463); - uint32_t _i1467; - for (_i1467 = 0; _i1467 < _size1463; ++_i1467) + uint32_t _size1490; + ::apache::thrift::protocol::TType _etype1493; + xfer += iprot->readListBegin(_etype1493, _size1490); + this->part_vals.resize(_size1490); + uint32_t _i1494; + for (_i1494 = 0; _i1494 < _size1490; ++_i1494) { - xfer += iprot->readString(this->part_vals[_i1467]); + xfer += iprot->readString(this->part_vals[_i1494]); } xfer += iprot->readListEnd(); } @@ -17739,10 +17990,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1468; - for (_iter1468 = this->part_vals.begin(); _iter1468 != this->part_vals.end(); ++_iter1468) + std::vector ::const_iterator _iter1495; + for (_iter1495 = this->part_vals.begin(); _iter1495 != this->part_vals.end(); ++_iter1495) { - xfer += oprot->writeString((*_iter1468)); + xfer += oprot->writeString((*_iter1495)); } xfer += oprot->writeListEnd(); } @@ -17778,10 +18029,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1469; - for (_iter1469 = (*(this->part_vals)).begin(); _iter1469 != (*(this->part_vals)).end(); ++_iter1469) + std::vector ::const_iterator _iter1496; + for (_iter1496 = (*(this->part_vals)).begin(); _iter1496 != (*(this->part_vals)).end(); ++_iter1496) { - xfer += oprot->writeString((*_iter1469)); + xfer += oprot->writeString((*_iter1496)); } xfer += oprot->writeListEnd(); } @@ -17826,14 +18077,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1470; - ::apache::thrift::protocol::TType _etype1473; - xfer += iprot->readListBegin(_etype1473, _size1470); - this->success.resize(_size1470); - uint32_t _i1474; - for (_i1474 = 0; _i1474 < _size1470; ++_i1474) + uint32_t _size1497; + ::apache::thrift::protocol::TType _etype1500; + xfer += iprot->readListBegin(_etype1500, _size1497); + this->success.resize(_size1497); + uint32_t _i1501; + for (_i1501 = 0; _i1501 < _size1497; ++_i1501) { - xfer += this->success[_i1474].read(iprot); + xfer += this->success[_i1501].read(iprot); } xfer += iprot->readListEnd(); } @@ -17880,10 +18131,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1475; - for (_iter1475 = this->success.begin(); _iter1475 != this->success.end(); ++_iter1475) + std::vector ::const_iterator _iter1502; + for (_iter1502 = this->success.begin(); _iter1502 != this->success.end(); ++_iter1502) { - xfer += (*_iter1475).write(oprot); + xfer += (*_iter1502).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17932,14 +18183,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1476; - ::apache::thrift::protocol::TType _etype1479; - xfer += iprot->readListBegin(_etype1479, _size1476); - (*(this->success)).resize(_size1476); - uint32_t _i1480; - for (_i1480 = 0; _i1480 < _size1476; ++_i1480) + uint32_t _size1503; + ::apache::thrift::protocol::TType _etype1506; + xfer += iprot->readListBegin(_etype1506, _size1503); + (*(this->success)).resize(_size1503); + uint32_t _i1507; + for (_i1507 = 0; _i1507 < _size1503; ++_i1507) { - xfer += (*(this->success))[_i1480].read(iprot); + xfer += (*(this->success))[_i1507].read(iprot); } xfer += iprot->readListEnd(); } @@ -18022,14 +18273,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1481; - ::apache::thrift::protocol::TType _etype1484; - xfer += iprot->readListBegin(_etype1484, _size1481); - this->part_vals.resize(_size1481); - uint32_t _i1485; - for (_i1485 = 0; _i1485 < _size1481; ++_i1485) + uint32_t _size1508; + ::apache::thrift::protocol::TType _etype1511; + xfer += iprot->readListBegin(_etype1511, _size1508); + this->part_vals.resize(_size1508); + uint32_t _i1512; + for (_i1512 = 0; _i1512 < _size1508; ++_i1512) { - xfer += iprot->readString(this->part_vals[_i1485]); + xfer += iprot->readString(this->part_vals[_i1512]); } xfer += iprot->readListEnd(); } @@ -18058,14 +18309,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1486; - ::apache::thrift::protocol::TType _etype1489; - xfer += iprot->readListBegin(_etype1489, _size1486); - this->group_names.resize(_size1486); - uint32_t _i1490; - for (_i1490 = 0; _i1490 < _size1486; ++_i1490) + uint32_t _size1513; + ::apache::thrift::protocol::TType _etype1516; + xfer += iprot->readListBegin(_etype1516, _size1513); + this->group_names.resize(_size1513); + uint32_t _i1517; + for (_i1517 = 0; _i1517 < _size1513; ++_i1517) { - xfer += iprot->readString(this->group_names[_i1490]); + xfer += iprot->readString(this->group_names[_i1517]); } xfer += iprot->readListEnd(); } @@ -18102,10 +18353,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1491; - for (_iter1491 = this->part_vals.begin(); _iter1491 != this->part_vals.end(); ++_iter1491) + std::vector ::const_iterator _iter1518; + for (_iter1518 = this->part_vals.begin(); _iter1518 != this->part_vals.end(); ++_iter1518) { - xfer += oprot->writeString((*_iter1491)); + xfer += oprot->writeString((*_iter1518)); } xfer += oprot->writeListEnd(); } @@ -18122,10 +18373,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1492; - for (_iter1492 = this->group_names.begin(); _iter1492 != this->group_names.end(); ++_iter1492) + std::vector ::const_iterator _iter1519; + for (_iter1519 = this->group_names.begin(); _iter1519 != this->group_names.end(); ++_iter1519) { - xfer += oprot->writeString((*_iter1492)); + xfer += oprot->writeString((*_iter1519)); } xfer += oprot->writeListEnd(); } @@ -18157,10 +18408,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1493; - for (_iter1493 = (*(this->part_vals)).begin(); _iter1493 != (*(this->part_vals)).end(); ++_iter1493) + std::vector ::const_iterator _iter1520; + for (_iter1520 = (*(this->part_vals)).begin(); _iter1520 != (*(this->part_vals)).end(); ++_iter1520) { - xfer += oprot->writeString((*_iter1493)); + xfer += oprot->writeString((*_iter1520)); } xfer += oprot->writeListEnd(); } @@ -18177,10 +18428,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1494; - for (_iter1494 = (*(this->group_names)).begin(); _iter1494 != (*(this->group_names)).end(); ++_iter1494) + std::vector ::const_iterator _iter1521; + for (_iter1521 = (*(this->group_names)).begin(); _iter1521 != (*(this->group_names)).end(); ++_iter1521) { - xfer += oprot->writeString((*_iter1494)); + xfer += oprot->writeString((*_iter1521)); } xfer += oprot->writeListEnd(); } @@ -18221,14 +18472,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1495; - ::apache::thrift::protocol::TType _etype1498; - xfer += iprot->readListBegin(_etype1498, _size1495); - this->success.resize(_size1495); - uint32_t _i1499; - for (_i1499 = 0; _i1499 < _size1495; ++_i1499) + uint32_t _size1522; + ::apache::thrift::protocol::TType _etype1525; + xfer += iprot->readListBegin(_etype1525, _size1522); + this->success.resize(_size1522); + uint32_t _i1526; + for (_i1526 = 0; _i1526 < _size1522; ++_i1526) { - xfer += this->success[_i1499].read(iprot); + xfer += this->success[_i1526].read(iprot); } xfer += iprot->readListEnd(); } @@ -18275,10 +18526,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1500; - for (_iter1500 = this->success.begin(); _iter1500 != this->success.end(); ++_iter1500) + std::vector ::const_iterator _iter1527; + for (_iter1527 = this->success.begin(); _iter1527 != this->success.end(); ++_iter1527) { - xfer += (*_iter1500).write(oprot); + xfer += (*_iter1527).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18327,14 +18578,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1501; - ::apache::thrift::protocol::TType _etype1504; - xfer += iprot->readListBegin(_etype1504, _size1501); - (*(this->success)).resize(_size1501); - uint32_t _i1505; - for (_i1505 = 0; _i1505 < _size1501; ++_i1505) + uint32_t _size1528; + ::apache::thrift::protocol::TType _etype1531; + xfer += iprot->readListBegin(_etype1531, _size1528); + (*(this->success)).resize(_size1528); + uint32_t _i1532; + for (_i1532 = 0; _i1532 < _size1528; ++_i1532) { - xfer += (*(this->success))[_i1505].read(iprot); + xfer += (*(this->success))[_i1532].read(iprot); } xfer += iprot->readListEnd(); } @@ -18417,14 +18668,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1506; - ::apache::thrift::protocol::TType _etype1509; - xfer += iprot->readListBegin(_etype1509, _size1506); - this->part_vals.resize(_size1506); - uint32_t _i1510; - for (_i1510 = 0; _i1510 < _size1506; ++_i1510) + uint32_t _size1533; + ::apache::thrift::protocol::TType _etype1536; + xfer += iprot->readListBegin(_etype1536, _size1533); + this->part_vals.resize(_size1533); + uint32_t _i1537; + for (_i1537 = 0; _i1537 < _size1533; ++_i1537) { - xfer += iprot->readString(this->part_vals[_i1510]); + xfer += iprot->readString(this->part_vals[_i1537]); } xfer += iprot->readListEnd(); } @@ -18469,10 +18720,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1511; - for (_iter1511 = this->part_vals.begin(); _iter1511 != this->part_vals.end(); ++_iter1511) + std::vector ::const_iterator _iter1538; + for (_iter1538 = this->part_vals.begin(); _iter1538 != this->part_vals.end(); ++_iter1538) { - xfer += oprot->writeString((*_iter1511)); + xfer += oprot->writeString((*_iter1538)); } xfer += oprot->writeListEnd(); } @@ -18508,10 +18759,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1512; - for (_iter1512 = (*(this->part_vals)).begin(); _iter1512 != (*(this->part_vals)).end(); ++_iter1512) + std::vector ::const_iterator _iter1539; + for (_iter1539 = (*(this->part_vals)).begin(); _iter1539 != (*(this->part_vals)).end(); ++_iter1539) { - xfer += oprot->writeString((*_iter1512)); + xfer += oprot->writeString((*_iter1539)); } xfer += oprot->writeListEnd(); } @@ -18556,14 +18807,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1513; - ::apache::thrift::protocol::TType _etype1516; - xfer += iprot->readListBegin(_etype1516, _size1513); - this->success.resize(_size1513); - uint32_t _i1517; - for (_i1517 = 0; _i1517 < _size1513; ++_i1517) + uint32_t _size1540; + ::apache::thrift::protocol::TType _etype1543; + xfer += iprot->readListBegin(_etype1543, _size1540); + this->success.resize(_size1540); + uint32_t _i1544; + for (_i1544 = 0; _i1544 < _size1540; ++_i1544) { - xfer += iprot->readString(this->success[_i1517]); + xfer += iprot->readString(this->success[_i1544]); } xfer += iprot->readListEnd(); } @@ -18610,10 +18861,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1518; - for (_iter1518 = this->success.begin(); _iter1518 != this->success.end(); ++_iter1518) + std::vector ::const_iterator _iter1545; + for (_iter1545 = this->success.begin(); _iter1545 != this->success.end(); ++_iter1545) { - xfer += oprot->writeString((*_iter1518)); + xfer += oprot->writeString((*_iter1545)); } xfer += oprot->writeListEnd(); } @@ -18662,14 +18913,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1519; - ::apache::thrift::protocol::TType _etype1522; - xfer += iprot->readListBegin(_etype1522, _size1519); - (*(this->success)).resize(_size1519); - uint32_t _i1523; - for (_i1523 = 0; _i1523 < _size1519; ++_i1523) + uint32_t _size1546; + ::apache::thrift::protocol::TType _etype1549; + xfer += iprot->readListBegin(_etype1549, _size1546); + (*(this->success)).resize(_size1546); + uint32_t _i1550; + for (_i1550 = 0; _i1550 < _size1546; ++_i1550) { - xfer += iprot->readString((*(this->success))[_i1523]); + xfer += iprot->readString((*(this->success))[_i1550]); } xfer += iprot->readListEnd(); } @@ -18863,14 +19114,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1524; - ::apache::thrift::protocol::TType _etype1527; - xfer += iprot->readListBegin(_etype1527, _size1524); - this->success.resize(_size1524); - uint32_t _i1528; - for (_i1528 = 0; _i1528 < _size1524; ++_i1528) + uint32_t _size1551; + ::apache::thrift::protocol::TType _etype1554; + xfer += iprot->readListBegin(_etype1554, _size1551); + this->success.resize(_size1551); + uint32_t _i1555; + for (_i1555 = 0; _i1555 < _size1551; ++_i1555) { - xfer += this->success[_i1528].read(iprot); + xfer += this->success[_i1555].read(iprot); } xfer += iprot->readListEnd(); } @@ -18917,10 +19168,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1529; - for (_iter1529 = this->success.begin(); _iter1529 != this->success.end(); ++_iter1529) + std::vector ::const_iterator _iter1556; + for (_iter1556 = this->success.begin(); _iter1556 != this->success.end(); ++_iter1556) { - xfer += (*_iter1529).write(oprot); + xfer += (*_iter1556).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18969,14 +19220,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1530; - ::apache::thrift::protocol::TType _etype1533; - xfer += iprot->readListBegin(_etype1533, _size1530); - (*(this->success)).resize(_size1530); - uint32_t _i1534; - for (_i1534 = 0; _i1534 < _size1530; ++_i1534) + uint32_t _size1557; + ::apache::thrift::protocol::TType _etype1560; + xfer += iprot->readListBegin(_etype1560, _size1557); + (*(this->success)).resize(_size1557); + uint32_t _i1561; + for (_i1561 = 0; _i1561 < _size1557; ++_i1561) { - xfer += (*(this->success))[_i1534].read(iprot); + xfer += (*(this->success))[_i1561].read(iprot); } xfer += iprot->readListEnd(); } @@ -19170,14 +19421,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1535; - ::apache::thrift::protocol::TType _etype1538; - xfer += iprot->readListBegin(_etype1538, _size1535); - this->success.resize(_size1535); - uint32_t _i1539; - for (_i1539 = 0; _i1539 < _size1535; ++_i1539) + uint32_t _size1562; + ::apache::thrift::protocol::TType _etype1565; + xfer += iprot->readListBegin(_etype1565, _size1562); + this->success.resize(_size1562); + uint32_t _i1566; + for (_i1566 = 0; _i1566 < _size1562; ++_i1566) { - xfer += this->success[_i1539].read(iprot); + xfer += this->success[_i1566].read(iprot); } xfer += iprot->readListEnd(); } @@ -19224,10 +19475,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1540; - for (_iter1540 = this->success.begin(); _iter1540 != this->success.end(); ++_iter1540) + std::vector ::const_iterator _iter1567; + for (_iter1567 = this->success.begin(); _iter1567 != this->success.end(); ++_iter1567) { - xfer += (*_iter1540).write(oprot); + xfer += (*_iter1567).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19276,14 +19527,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1541; - ::apache::thrift::protocol::TType _etype1544; - xfer += iprot->readListBegin(_etype1544, _size1541); - (*(this->success)).resize(_size1541); - uint32_t _i1545; - for (_i1545 = 0; _i1545 < _size1541; ++_i1545) + uint32_t _size1568; + ::apache::thrift::protocol::TType _etype1571; + xfer += iprot->readListBegin(_etype1571, _size1568); + (*(this->success)).resize(_size1568); + uint32_t _i1572; + for (_i1572 = 0; _i1572 < _size1568; ++_i1572) { - xfer += (*(this->success))[_i1545].read(iprot); + xfer += (*(this->success))[_i1572].read(iprot); } xfer += iprot->readListEnd(); } @@ -19852,14 +20103,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1546; - ::apache::thrift::protocol::TType _etype1549; - xfer += iprot->readListBegin(_etype1549, _size1546); - this->names.resize(_size1546); - uint32_t _i1550; - for (_i1550 = 0; _i1550 < _size1546; ++_i1550) + uint32_t _size1573; + ::apache::thrift::protocol::TType _etype1576; + xfer += iprot->readListBegin(_etype1576, _size1573); + this->names.resize(_size1573); + uint32_t _i1577; + for (_i1577 = 0; _i1577 < _size1573; ++_i1577) { - xfer += iprot->readString(this->names[_i1550]); + xfer += iprot->readString(this->names[_i1577]); } xfer += iprot->readListEnd(); } @@ -19896,10 +20147,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1551; - for (_iter1551 = this->names.begin(); _iter1551 != this->names.end(); ++_iter1551) + std::vector ::const_iterator _iter1578; + for (_iter1578 = this->names.begin(); _iter1578 != this->names.end(); ++_iter1578) { - xfer += oprot->writeString((*_iter1551)); + xfer += oprot->writeString((*_iter1578)); } xfer += oprot->writeListEnd(); } @@ -19931,10 +20182,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1552; - for (_iter1552 = (*(this->names)).begin(); _iter1552 != (*(this->names)).end(); ++_iter1552) + std::vector ::const_iterator _iter1579; + for (_iter1579 = (*(this->names)).begin(); _iter1579 != (*(this->names)).end(); ++_iter1579) { - xfer += oprot->writeString((*_iter1552)); + xfer += oprot->writeString((*_iter1579)); } xfer += oprot->writeListEnd(); } @@ -19975,14 +20226,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1553; - ::apache::thrift::protocol::TType _etype1556; - xfer += iprot->readListBegin(_etype1556, _size1553); - this->success.resize(_size1553); - uint32_t _i1557; - for (_i1557 = 0; _i1557 < _size1553; ++_i1557) + uint32_t _size1580; + ::apache::thrift::protocol::TType _etype1583; + xfer += iprot->readListBegin(_etype1583, _size1580); + this->success.resize(_size1580); + uint32_t _i1584; + for (_i1584 = 0; _i1584 < _size1580; ++_i1584) { - xfer += this->success[_i1557].read(iprot); + xfer += this->success[_i1584].read(iprot); } xfer += iprot->readListEnd(); } @@ -20029,10 +20280,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1558; - for (_iter1558 = this->success.begin(); _iter1558 != this->success.end(); ++_iter1558) + std::vector ::const_iterator _iter1585; + for (_iter1585 = this->success.begin(); _iter1585 != this->success.end(); ++_iter1585) { - xfer += (*_iter1558).write(oprot); + xfer += (*_iter1585).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20081,14 +20332,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1559; - ::apache::thrift::protocol::TType _etype1562; - xfer += iprot->readListBegin(_etype1562, _size1559); - (*(this->success)).resize(_size1559); - uint32_t _i1563; - for (_i1563 = 0; _i1563 < _size1559; ++_i1563) + uint32_t _size1586; + ::apache::thrift::protocol::TType _etype1589; + xfer += iprot->readListBegin(_etype1589, _size1586); + (*(this->success)).resize(_size1586); + uint32_t _i1590; + for (_i1590 = 0; _i1590 < _size1586; ++_i1590) { - xfer += (*(this->success))[_i1563].read(iprot); + xfer += (*(this->success))[_i1590].read(iprot); } xfer += iprot->readListEnd(); } @@ -20410,14 +20661,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1564; - ::apache::thrift::protocol::TType _etype1567; - xfer += iprot->readListBegin(_etype1567, _size1564); - this->new_parts.resize(_size1564); - uint32_t _i1568; - for (_i1568 = 0; _i1568 < _size1564; ++_i1568) + uint32_t _size1591; + ::apache::thrift::protocol::TType _etype1594; + xfer += iprot->readListBegin(_etype1594, _size1591); + this->new_parts.resize(_size1591); + uint32_t _i1595; + for (_i1595 = 0; _i1595 < _size1591; ++_i1595) { - xfer += this->new_parts[_i1568].read(iprot); + xfer += this->new_parts[_i1595].read(iprot); } xfer += iprot->readListEnd(); } @@ -20454,10 +20705,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1569; - for (_iter1569 = this->new_parts.begin(); _iter1569 != this->new_parts.end(); ++_iter1569) + std::vector ::const_iterator _iter1596; + for (_iter1596 = this->new_parts.begin(); _iter1596 != this->new_parts.end(); ++_iter1596) { - xfer += (*_iter1569).write(oprot); + xfer += (*_iter1596).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20489,10 +20740,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1570; - for (_iter1570 = (*(this->new_parts)).begin(); _iter1570 != (*(this->new_parts)).end(); ++_iter1570) + std::vector ::const_iterator _iter1597; + for (_iter1597 = (*(this->new_parts)).begin(); _iter1597 != (*(this->new_parts)).end(); ++_iter1597) { - xfer += (*_iter1570).write(oprot); + xfer += (*_iter1597).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20677,14 +20928,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1571; - ::apache::thrift::protocol::TType _etype1574; - xfer += iprot->readListBegin(_etype1574, _size1571); - this->new_parts.resize(_size1571); - uint32_t _i1575; - for (_i1575 = 0; _i1575 < _size1571; ++_i1575) + uint32_t _size1598; + ::apache::thrift::protocol::TType _etype1601; + xfer += iprot->readListBegin(_etype1601, _size1598); + this->new_parts.resize(_size1598); + uint32_t _i1602; + for (_i1602 = 0; _i1602 < _size1598; ++_i1602) { - xfer += this->new_parts[_i1575].read(iprot); + xfer += this->new_parts[_i1602].read(iprot); } xfer += iprot->readListEnd(); } @@ -20729,10 +20980,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1576; - for (_iter1576 = this->new_parts.begin(); _iter1576 != this->new_parts.end(); ++_iter1576) + std::vector ::const_iterator _iter1603; + for (_iter1603 = this->new_parts.begin(); _iter1603 != this->new_parts.end(); ++_iter1603) { - xfer += (*_iter1576).write(oprot); + xfer += (*_iter1603).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20768,10 +21019,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1577; - for (_iter1577 = (*(this->new_parts)).begin(); _iter1577 != (*(this->new_parts)).end(); ++_iter1577) + std::vector ::const_iterator _iter1604; + for (_iter1604 = (*(this->new_parts)).begin(); _iter1604 != (*(this->new_parts)).end(); ++_iter1604) { - xfer += (*_iter1577).write(oprot); + xfer += (*_iter1604).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21215,14 +21466,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1578; - ::apache::thrift::protocol::TType _etype1581; - xfer += iprot->readListBegin(_etype1581, _size1578); - this->part_vals.resize(_size1578); - uint32_t _i1582; - for (_i1582 = 0; _i1582 < _size1578; ++_i1582) + uint32_t _size1605; + ::apache::thrift::protocol::TType _etype1608; + xfer += iprot->readListBegin(_etype1608, _size1605); + this->part_vals.resize(_size1605); + uint32_t _i1609; + for (_i1609 = 0; _i1609 < _size1605; ++_i1609) { - xfer += iprot->readString(this->part_vals[_i1582]); + xfer += iprot->readString(this->part_vals[_i1609]); } xfer += iprot->readListEnd(); } @@ -21267,10 +21518,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1583; - for (_iter1583 = this->part_vals.begin(); _iter1583 != this->part_vals.end(); ++_iter1583) + std::vector ::const_iterator _iter1610; + for (_iter1610 = this->part_vals.begin(); _iter1610 != this->part_vals.end(); ++_iter1610) { - xfer += oprot->writeString((*_iter1583)); + xfer += oprot->writeString((*_iter1610)); } xfer += oprot->writeListEnd(); } @@ -21306,10 +21557,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1584; - for (_iter1584 = (*(this->part_vals)).begin(); _iter1584 != (*(this->part_vals)).end(); ++_iter1584) + std::vector ::const_iterator _iter1611; + for (_iter1611 = (*(this->part_vals)).begin(); _iter1611 != (*(this->part_vals)).end(); ++_iter1611) { - xfer += oprot->writeString((*_iter1584)); + xfer += oprot->writeString((*_iter1611)); } xfer += oprot->writeListEnd(); } @@ -21482,14 +21733,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1585; - ::apache::thrift::protocol::TType _etype1588; - xfer += iprot->readListBegin(_etype1588, _size1585); - this->part_vals.resize(_size1585); - uint32_t _i1589; - for (_i1589 = 0; _i1589 < _size1585; ++_i1589) + uint32_t _size1612; + ::apache::thrift::protocol::TType _etype1615; + xfer += iprot->readListBegin(_etype1615, _size1612); + this->part_vals.resize(_size1612); + uint32_t _i1616; + for (_i1616 = 0; _i1616 < _size1612; ++_i1616) { - xfer += iprot->readString(this->part_vals[_i1589]); + xfer += iprot->readString(this->part_vals[_i1616]); } xfer += iprot->readListEnd(); } @@ -21526,10 +21777,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1590; - for (_iter1590 = this->part_vals.begin(); _iter1590 != this->part_vals.end(); ++_iter1590) + std::vector ::const_iterator _iter1617; + for (_iter1617 = this->part_vals.begin(); _iter1617 != this->part_vals.end(); ++_iter1617) { - xfer += oprot->writeString((*_iter1590)); + xfer += oprot->writeString((*_iter1617)); } xfer += oprot->writeListEnd(); } @@ -21557,10 +21808,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1591; - for (_iter1591 = (*(this->part_vals)).begin(); _iter1591 != (*(this->part_vals)).end(); ++_iter1591) + std::vector ::const_iterator _iter1618; + for (_iter1618 = (*(this->part_vals)).begin(); _iter1618 != (*(this->part_vals)).end(); ++_iter1618) { - xfer += oprot->writeString((*_iter1591)); + xfer += oprot->writeString((*_iter1618)); } xfer += oprot->writeListEnd(); } @@ -22035,14 +22286,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1592; - ::apache::thrift::protocol::TType _etype1595; - xfer += iprot->readListBegin(_etype1595, _size1592); - this->success.resize(_size1592); - uint32_t _i1596; - for (_i1596 = 0; _i1596 < _size1592; ++_i1596) + uint32_t _size1619; + ::apache::thrift::protocol::TType _etype1622; + xfer += iprot->readListBegin(_etype1622, _size1619); + this->success.resize(_size1619); + uint32_t _i1623; + for (_i1623 = 0; _i1623 < _size1619; ++_i1623) { - xfer += iprot->readString(this->success[_i1596]); + xfer += iprot->readString(this->success[_i1623]); } xfer += iprot->readListEnd(); } @@ -22081,10 +22332,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1597; - for (_iter1597 = this->success.begin(); _iter1597 != this->success.end(); ++_iter1597) + std::vector ::const_iterator _iter1624; + for (_iter1624 = this->success.begin(); _iter1624 != this->success.end(); ++_iter1624) { - xfer += oprot->writeString((*_iter1597)); + xfer += oprot->writeString((*_iter1624)); } xfer += oprot->writeListEnd(); } @@ -22129,14 +22380,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1598; - ::apache::thrift::protocol::TType _etype1601; - xfer += iprot->readListBegin(_etype1601, _size1598); - (*(this->success)).resize(_size1598); - uint32_t _i1602; - for (_i1602 = 0; _i1602 < _size1598; ++_i1602) + uint32_t _size1625; + ::apache::thrift::protocol::TType _etype1628; + xfer += iprot->readListBegin(_etype1628, _size1625); + (*(this->success)).resize(_size1625); + uint32_t _i1629; + for (_i1629 = 0; _i1629 < _size1625; ++_i1629) { - xfer += iprot->readString((*(this->success))[_i1602]); + xfer += iprot->readString((*(this->success))[_i1629]); } xfer += iprot->readListEnd(); } @@ -22274,17 +22525,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1603; - ::apache::thrift::protocol::TType _ktype1604; - ::apache::thrift::protocol::TType _vtype1605; - xfer += iprot->readMapBegin(_ktype1604, _vtype1605, _size1603); - uint32_t _i1607; - for (_i1607 = 0; _i1607 < _size1603; ++_i1607) + uint32_t _size1630; + ::apache::thrift::protocol::TType _ktype1631; + ::apache::thrift::protocol::TType _vtype1632; + xfer += iprot->readMapBegin(_ktype1631, _vtype1632, _size1630); + uint32_t _i1634; + for (_i1634 = 0; _i1634 < _size1630; ++_i1634) { - std::string _key1608; - xfer += iprot->readString(_key1608); - std::string& _val1609 = this->success[_key1608]; - xfer += iprot->readString(_val1609); + std::string _key1635; + xfer += iprot->readString(_key1635); + std::string& _val1636 = this->success[_key1635]; + xfer += iprot->readString(_val1636); } xfer += iprot->readMapEnd(); } @@ -22323,11 +22574,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1610; - for (_iter1610 = this->success.begin(); _iter1610 != this->success.end(); ++_iter1610) + std::map ::const_iterator _iter1637; + for (_iter1637 = this->success.begin(); _iter1637 != this->success.end(); ++_iter1637) { - xfer += oprot->writeString(_iter1610->first); - xfer += oprot->writeString(_iter1610->second); + xfer += oprot->writeString(_iter1637->first); + xfer += oprot->writeString(_iter1637->second); } xfer += oprot->writeMapEnd(); } @@ -22372,17 +22623,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1611; - ::apache::thrift::protocol::TType _ktype1612; - ::apache::thrift::protocol::TType _vtype1613; - xfer += iprot->readMapBegin(_ktype1612, _vtype1613, _size1611); - uint32_t _i1615; - for (_i1615 = 0; _i1615 < _size1611; ++_i1615) + uint32_t _size1638; + ::apache::thrift::protocol::TType _ktype1639; + ::apache::thrift::protocol::TType _vtype1640; + xfer += iprot->readMapBegin(_ktype1639, _vtype1640, _size1638); + uint32_t _i1642; + for (_i1642 = 0; _i1642 < _size1638; ++_i1642) { - std::string _key1616; - xfer += iprot->readString(_key1616); - std::string& _val1617 = (*(this->success))[_key1616]; - xfer += iprot->readString(_val1617); + std::string _key1643; + xfer += iprot->readString(_key1643); + std::string& _val1644 = (*(this->success))[_key1643]; + xfer += iprot->readString(_val1644); } xfer += iprot->readMapEnd(); } @@ -22457,17 +22708,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1618; - ::apache::thrift::protocol::TType _ktype1619; - ::apache::thrift::protocol::TType _vtype1620; - xfer += iprot->readMapBegin(_ktype1619, _vtype1620, _size1618); - uint32_t _i1622; - for (_i1622 = 0; _i1622 < _size1618; ++_i1622) + uint32_t _size1645; + ::apache::thrift::protocol::TType _ktype1646; + ::apache::thrift::protocol::TType _vtype1647; + xfer += iprot->readMapBegin(_ktype1646, _vtype1647, _size1645); + uint32_t _i1649; + for (_i1649 = 0; _i1649 < _size1645; ++_i1649) { - std::string _key1623; - xfer += iprot->readString(_key1623); - std::string& _val1624 = this->part_vals[_key1623]; - xfer += iprot->readString(_val1624); + std::string _key1650; + xfer += iprot->readString(_key1650); + std::string& _val1651 = this->part_vals[_key1650]; + xfer += iprot->readString(_val1651); } xfer += iprot->readMapEnd(); } @@ -22478,9 +22729,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1625; - xfer += iprot->readI32(ecast1625); - this->eventType = (PartitionEventType::type)ecast1625; + int32_t ecast1652; + xfer += iprot->readI32(ecast1652); + this->eventType = (PartitionEventType::type)ecast1652; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22514,11 +22765,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1626; - for (_iter1626 = this->part_vals.begin(); _iter1626 != this->part_vals.end(); ++_iter1626) + std::map ::const_iterator _iter1653; + for (_iter1653 = this->part_vals.begin(); _iter1653 != this->part_vals.end(); ++_iter1653) { - xfer += oprot->writeString(_iter1626->first); - xfer += oprot->writeString(_iter1626->second); + xfer += oprot->writeString(_iter1653->first); + xfer += oprot->writeString(_iter1653->second); } xfer += oprot->writeMapEnd(); } @@ -22554,11 +22805,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1627; - for (_iter1627 = (*(this->part_vals)).begin(); _iter1627 != (*(this->part_vals)).end(); ++_iter1627) + std::map ::const_iterator _iter1654; + for (_iter1654 = (*(this->part_vals)).begin(); _iter1654 != (*(this->part_vals)).end(); ++_iter1654) { - xfer += oprot->writeString(_iter1627->first); - xfer += oprot->writeString(_iter1627->second); + xfer += oprot->writeString(_iter1654->first); + xfer += oprot->writeString(_iter1654->second); } xfer += oprot->writeMapEnd(); } @@ -22827,17 +23078,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1628; - ::apache::thrift::protocol::TType _ktype1629; - ::apache::thrift::protocol::TType _vtype1630; - xfer += iprot->readMapBegin(_ktype1629, _vtype1630, _size1628); - uint32_t _i1632; - for (_i1632 = 0; _i1632 < _size1628; ++_i1632) + uint32_t _size1655; + ::apache::thrift::protocol::TType _ktype1656; + ::apache::thrift::protocol::TType _vtype1657; + xfer += iprot->readMapBegin(_ktype1656, _vtype1657, _size1655); + uint32_t _i1659; + for (_i1659 = 0; _i1659 < _size1655; ++_i1659) { - std::string _key1633; - xfer += iprot->readString(_key1633); - std::string& _val1634 = this->part_vals[_key1633]; - xfer += iprot->readString(_val1634); + std::string _key1660; + xfer += iprot->readString(_key1660); + std::string& _val1661 = this->part_vals[_key1660]; + xfer += iprot->readString(_val1661); } xfer += iprot->readMapEnd(); } @@ -22848,9 +23099,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1635; - xfer += iprot->readI32(ecast1635); - this->eventType = (PartitionEventType::type)ecast1635; + int32_t ecast1662; + xfer += iprot->readI32(ecast1662); + this->eventType = (PartitionEventType::type)ecast1662; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22884,11 +23135,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1636; - for (_iter1636 = this->part_vals.begin(); _iter1636 != this->part_vals.end(); ++_iter1636) + std::map ::const_iterator _iter1663; + for (_iter1663 = this->part_vals.begin(); _iter1663 != this->part_vals.end(); ++_iter1663) { - xfer += oprot->writeString(_iter1636->first); - xfer += oprot->writeString(_iter1636->second); + xfer += oprot->writeString(_iter1663->first); + xfer += oprot->writeString(_iter1663->second); } xfer += oprot->writeMapEnd(); } @@ -22924,11 +23175,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1637; - for (_iter1637 = (*(this->part_vals)).begin(); _iter1637 != (*(this->part_vals)).end(); ++_iter1637) + std::map ::const_iterator _iter1664; + for (_iter1664 = (*(this->part_vals)).begin(); _iter1664 != (*(this->part_vals)).end(); ++_iter1664) { - xfer += oprot->writeString(_iter1637->first); - xfer += oprot->writeString(_iter1637->second); + xfer += oprot->writeString(_iter1664->first); + xfer += oprot->writeString(_iter1664->second); } xfer += oprot->writeMapEnd(); } @@ -24364,14 +24615,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1638; - ::apache::thrift::protocol::TType _etype1641; - xfer += iprot->readListBegin(_etype1641, _size1638); - this->success.resize(_size1638); - uint32_t _i1642; - for (_i1642 = 0; _i1642 < _size1638; ++_i1642) + uint32_t _size1665; + ::apache::thrift::protocol::TType _etype1668; + xfer += iprot->readListBegin(_etype1668, _size1665); + this->success.resize(_size1665); + uint32_t _i1669; + for (_i1669 = 0; _i1669 < _size1665; ++_i1669) { - xfer += this->success[_i1642].read(iprot); + xfer += this->success[_i1669].read(iprot); } xfer += iprot->readListEnd(); } @@ -24418,10 +24669,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1643; - for (_iter1643 = this->success.begin(); _iter1643 != this->success.end(); ++_iter1643) + std::vector ::const_iterator _iter1670; + for (_iter1670 = this->success.begin(); _iter1670 != this->success.end(); ++_iter1670) { - xfer += (*_iter1643).write(oprot); + xfer += (*_iter1670).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24470,14 +24721,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1644; - ::apache::thrift::protocol::TType _etype1647; - xfer += iprot->readListBegin(_etype1647, _size1644); - (*(this->success)).resize(_size1644); - uint32_t _i1648; - for (_i1648 = 0; _i1648 < _size1644; ++_i1648) + uint32_t _size1671; + ::apache::thrift::protocol::TType _etype1674; + xfer += iprot->readListBegin(_etype1674, _size1671); + (*(this->success)).resize(_size1671); + uint32_t _i1675; + for (_i1675 = 0; _i1675 < _size1671; ++_i1675) { - xfer += (*(this->success))[_i1648].read(iprot); + xfer += (*(this->success))[_i1675].read(iprot); } xfer += iprot->readListEnd(); } @@ -24655,14 +24906,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1649; - ::apache::thrift::protocol::TType _etype1652; - xfer += iprot->readListBegin(_etype1652, _size1649); - this->success.resize(_size1649); - uint32_t _i1653; - for (_i1653 = 0; _i1653 < _size1649; ++_i1653) + uint32_t _size1676; + ::apache::thrift::protocol::TType _etype1679; + xfer += iprot->readListBegin(_etype1679, _size1676); + this->success.resize(_size1676); + uint32_t _i1680; + for (_i1680 = 0; _i1680 < _size1676; ++_i1680) { - xfer += iprot->readString(this->success[_i1653]); + xfer += iprot->readString(this->success[_i1680]); } xfer += iprot->readListEnd(); } @@ -24701,10 +24952,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1654; - for (_iter1654 = this->success.begin(); _iter1654 != this->success.end(); ++_iter1654) + std::vector ::const_iterator _iter1681; + for (_iter1681 = this->success.begin(); _iter1681 != this->success.end(); ++_iter1681) { - xfer += oprot->writeString((*_iter1654)); + xfer += oprot->writeString((*_iter1681)); } xfer += oprot->writeListEnd(); } @@ -24749,14 +25000,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1655; - ::apache::thrift::protocol::TType _etype1658; - xfer += iprot->readListBegin(_etype1658, _size1655); - (*(this->success)).resize(_size1655); - uint32_t _i1659; - for (_i1659 = 0; _i1659 < _size1655; ++_i1659) + uint32_t _size1682; + ::apache::thrift::protocol::TType _etype1685; + xfer += iprot->readListBegin(_etype1685, _size1682); + (*(this->success)).resize(_size1682); + uint32_t _i1686; + for (_i1686 = 0; _i1686 < _size1682; ++_i1686) { - xfer += iprot->readString((*(this->success))[_i1659]); + xfer += iprot->readString((*(this->success))[_i1686]); } xfer += iprot->readListEnd(); } @@ -25694,11 +25945,11 @@ uint32_t ThriftHiveMetastore_get_not_null_constraints_presult::read(::apache::th } -ThriftHiveMetastore_update_table_column_statistics_args::~ThriftHiveMetastore_update_table_column_statistics_args() throw() { +ThriftHiveMetastore_get_default_constraints_args::~ThriftHiveMetastore_get_default_constraints_args() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_default_constraints_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -25721,8 +25972,8 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache: { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->stats_obj.read(iprot); - this->__isset.stats_obj = true; + xfer += this->request.read(iprot); + this->__isset.request = true; } else { xfer += iprot->skip(ftype); } @@ -25739,13 +25990,13 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache: return xfer; } -uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_default_constraints_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_default_constraints_args"); - xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->stats_obj.write(oprot); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25754,17 +26005,17 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache } -ThriftHiveMetastore_update_table_column_statistics_pargs::~ThriftHiveMetastore_update_table_column_statistics_pargs() throw() { +ThriftHiveMetastore_get_default_constraints_pargs::~ThriftHiveMetastore_get_default_constraints_pargs() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_default_constraints_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_default_constraints_pargs"); - xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->stats_obj)).write(oprot); + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -25773,11 +26024,11 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apach } -ThriftHiveMetastore_update_table_column_statistics_result::~ThriftHiveMetastore_update_table_column_statistics_result() throw() { +ThriftHiveMetastore_get_default_constraints_result::~ThriftHiveMetastore_get_default_constraints_result() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_default_constraints_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -25799,8 +26050,8 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apach switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25822,22 +26073,6 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apach xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -25850,15 +26085,15 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apach return xfer; } -uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_default_constraints_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_default_constraints_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -25868,14 +26103,6 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apac xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o4) { - xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->o4.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -25883,11 +26110,11 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apac } -ThriftHiveMetastore_update_table_column_statistics_presult::~ThriftHiveMetastore_update_table_column_statistics_presult() throw() { +ThriftHiveMetastore_get_default_constraints_presult::~ThriftHiveMetastore_get_default_constraints_presult() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_default_constraints_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -25909,8 +26136,8 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apac switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -25932,22 +26159,6 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apac xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -25961,11 +26172,11 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apac } -ThriftHiveMetastore_update_partition_column_statistics_args::~ThriftHiveMetastore_update_partition_column_statistics_args() throw() { +ThriftHiveMetastore_update_table_column_statistics_args::~ThriftHiveMetastore_update_table_column_statistics_args() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -26006,10 +26217,10 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apa return xfer; } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_args"); xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->stats_obj.write(oprot); @@ -26021,14 +26232,14 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::ap } -ThriftHiveMetastore_update_partition_column_statistics_pargs::~ThriftHiveMetastore_update_partition_column_statistics_pargs() throw() { +ThriftHiveMetastore_update_table_column_statistics_pargs::~ThriftHiveMetastore_update_table_column_statistics_pargs() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_pargs"); xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->stats_obj)).write(oprot); @@ -26040,11 +26251,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::a } -ThriftHiveMetastore_update_partition_column_statistics_result::~ThriftHiveMetastore_update_partition_column_statistics_result() throw() { +ThriftHiveMetastore_update_table_column_statistics_result::~ThriftHiveMetastore_update_table_column_statistics_result() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -26117,11 +26328,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::a return xfer; } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); @@ -26150,11 +26361,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(:: } -ThriftHiveMetastore_update_partition_column_statistics_presult::~ThriftHiveMetastore_update_partition_column_statistics_presult() throw() { +ThriftHiveMetastore_update_table_column_statistics_presult::~ThriftHiveMetastore_update_table_column_statistics_presult() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -26228,11 +26439,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(:: } -ThriftHiveMetastore_get_table_column_statistics_args::~ThriftHiveMetastore_get_table_column_statistics_args() throw() { +ThriftHiveMetastore_update_partition_column_statistics_args::~ThriftHiveMetastore_update_partition_column_statistics_args() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -26254,25 +26465,9 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::th switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->col_name); - this->__isset.col_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->stats_obj.read(iprot); + this->__isset.stats_obj = true; } else { xfer += iprot->skip(ftype); } @@ -26289,21 +26484,13 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::th return xfer; } -uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_args"); - xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->col_name); + xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->stats_obj.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26312,25 +26499,17 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::t } -ThriftHiveMetastore_get_table_column_statistics_pargs::~ThriftHiveMetastore_get_table_column_statistics_pargs() throw() { +ThriftHiveMetastore_update_partition_column_statistics_pargs::~ThriftHiveMetastore_update_partition_column_statistics_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_pargs"); - xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->col_name))); + xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->stats_obj)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -26339,11 +26518,11 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache:: } -ThriftHiveMetastore_get_table_column_statistics_result::~ThriftHiveMetastore_get_table_column_statistics_result() throw() { +ThriftHiveMetastore_update_partition_column_statistics_result::~ThriftHiveMetastore_update_partition_column_statistics_result() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -26365,8 +26544,8 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -26416,15 +26595,314 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache:: return xfer; } -uint32_t ThriftHiveMetastore_get_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_update_partition_column_statistics_presult::~ThriftHiveMetastore_update_partition_column_statistics_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_table_column_statistics_args::~ThriftHiveMetastore_get_table_column_statistics_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->col_name); + this->__isset.col_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->col_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_table_column_statistics_pargs::~ThriftHiveMetastore_get_table_column_statistics_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->col_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_table_column_statistics_result::~ThriftHiveMetastore_get_table_column_statistics_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -29237,14 +29715,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1660; - ::apache::thrift::protocol::TType _etype1663; - xfer += iprot->readListBegin(_etype1663, _size1660); - this->success.resize(_size1660); - uint32_t _i1664; - for (_i1664 = 0; _i1664 < _size1660; ++_i1664) + uint32_t _size1687; + ::apache::thrift::protocol::TType _etype1690; + xfer += iprot->readListBegin(_etype1690, _size1687); + this->success.resize(_size1687); + uint32_t _i1691; + for (_i1691 = 0; _i1691 < _size1687; ++_i1691) { - xfer += iprot->readString(this->success[_i1664]); + xfer += iprot->readString(this->success[_i1691]); } xfer += iprot->readListEnd(); } @@ -29283,10 +29761,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1665; - for (_iter1665 = this->success.begin(); _iter1665 != this->success.end(); ++_iter1665) + std::vector ::const_iterator _iter1692; + for (_iter1692 = this->success.begin(); _iter1692 != this->success.end(); ++_iter1692) { - xfer += oprot->writeString((*_iter1665)); + xfer += oprot->writeString((*_iter1692)); } xfer += oprot->writeListEnd(); } @@ -29331,14 +29809,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1666; - ::apache::thrift::protocol::TType _etype1669; - xfer += iprot->readListBegin(_etype1669, _size1666); - (*(this->success)).resize(_size1666); - uint32_t _i1670; - for (_i1670 = 0; _i1670 < _size1666; ++_i1670) + uint32_t _size1693; + ::apache::thrift::protocol::TType _etype1696; + xfer += iprot->readListBegin(_etype1696, _size1693); + (*(this->success)).resize(_size1693); + uint32_t _i1697; + for (_i1697 = 0; _i1697 < _size1693; ++_i1697) { - xfer += iprot->readString((*(this->success))[_i1670]); + xfer += iprot->readString((*(this->success))[_i1697]); } xfer += iprot->readListEnd(); } @@ -30298,14 +30776,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1671; - ::apache::thrift::protocol::TType _etype1674; - xfer += iprot->readListBegin(_etype1674, _size1671); - this->success.resize(_size1671); - uint32_t _i1675; - for (_i1675 = 0; _i1675 < _size1671; ++_i1675) + uint32_t _size1698; + ::apache::thrift::protocol::TType _etype1701; + xfer += iprot->readListBegin(_etype1701, _size1698); + this->success.resize(_size1698); + uint32_t _i1702; + for (_i1702 = 0; _i1702 < _size1698; ++_i1702) { - xfer += iprot->readString(this->success[_i1675]); + xfer += iprot->readString(this->success[_i1702]); } xfer += iprot->readListEnd(); } @@ -30344,10 +30822,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1676; - for (_iter1676 = this->success.begin(); _iter1676 != this->success.end(); ++_iter1676) + std::vector ::const_iterator _iter1703; + for (_iter1703 = this->success.begin(); _iter1703 != this->success.end(); ++_iter1703) { - xfer += oprot->writeString((*_iter1676)); + xfer += oprot->writeString((*_iter1703)); } xfer += oprot->writeListEnd(); } @@ -30392,14 +30870,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1677; - ::apache::thrift::protocol::TType _etype1680; - xfer += iprot->readListBegin(_etype1680, _size1677); - (*(this->success)).resize(_size1677); - uint32_t _i1681; - for (_i1681 = 0; _i1681 < _size1677; ++_i1681) + uint32_t _size1704; + ::apache::thrift::protocol::TType _etype1707; + xfer += iprot->readListBegin(_etype1707, _size1704); + (*(this->success)).resize(_size1704); + uint32_t _i1708; + for (_i1708 = 0; _i1708 < _size1704; ++_i1708) { - xfer += iprot->readString((*(this->success))[_i1681]); + xfer += iprot->readString((*(this->success))[_i1708]); } xfer += iprot->readListEnd(); } @@ -30472,9 +30950,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1682; - xfer += iprot->readI32(ecast1682); - this->principal_type = (PrincipalType::type)ecast1682; + int32_t ecast1709; + xfer += iprot->readI32(ecast1709); + this->principal_type = (PrincipalType::type)ecast1709; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30490,9 +30968,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1683; - xfer += iprot->readI32(ecast1683); - this->grantorType = (PrincipalType::type)ecast1683; + int32_t ecast1710; + xfer += iprot->readI32(ecast1710); + this->grantorType = (PrincipalType::type)ecast1710; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -30763,9 +31241,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1684; - xfer += iprot->readI32(ecast1684); - this->principal_type = (PrincipalType::type)ecast1684; + int32_t ecast1711; + xfer += iprot->readI32(ecast1711); + this->principal_type = (PrincipalType::type)ecast1711; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30996,9 +31474,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1685; - xfer += iprot->readI32(ecast1685); - this->principal_type = (PrincipalType::type)ecast1685; + int32_t ecast1712; + xfer += iprot->readI32(ecast1712); + this->principal_type = (PrincipalType::type)ecast1712; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31087,14 +31565,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1686; - ::apache::thrift::protocol::TType _etype1689; - xfer += iprot->readListBegin(_etype1689, _size1686); - this->success.resize(_size1686); - uint32_t _i1690; - for (_i1690 = 0; _i1690 < _size1686; ++_i1690) + uint32_t _size1713; + ::apache::thrift::protocol::TType _etype1716; + xfer += iprot->readListBegin(_etype1716, _size1713); + this->success.resize(_size1713); + uint32_t _i1717; + for (_i1717 = 0; _i1717 < _size1713; ++_i1717) { - xfer += this->success[_i1690].read(iprot); + xfer += this->success[_i1717].read(iprot); } xfer += iprot->readListEnd(); } @@ -31133,10 +31611,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1691; - for (_iter1691 = this->success.begin(); _iter1691 != this->success.end(); ++_iter1691) + std::vector ::const_iterator _iter1718; + for (_iter1718 = this->success.begin(); _iter1718 != this->success.end(); ++_iter1718) { - xfer += (*_iter1691).write(oprot); + xfer += (*_iter1718).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31181,14 +31659,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1692; - ::apache::thrift::protocol::TType _etype1695; - xfer += iprot->readListBegin(_etype1695, _size1692); - (*(this->success)).resize(_size1692); - uint32_t _i1696; - for (_i1696 = 0; _i1696 < _size1692; ++_i1696) + uint32_t _size1719; + ::apache::thrift::protocol::TType _etype1722; + xfer += iprot->readListBegin(_etype1722, _size1719); + (*(this->success)).resize(_size1719); + uint32_t _i1723; + for (_i1723 = 0; _i1723 < _size1719; ++_i1723) { - xfer += (*(this->success))[_i1696].read(iprot); + xfer += (*(this->success))[_i1723].read(iprot); } xfer += iprot->readListEnd(); } @@ -31884,14 +32362,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1697; - ::apache::thrift::protocol::TType _etype1700; - xfer += iprot->readListBegin(_etype1700, _size1697); - this->group_names.resize(_size1697); - uint32_t _i1701; - for (_i1701 = 0; _i1701 < _size1697; ++_i1701) + uint32_t _size1724; + ::apache::thrift::protocol::TType _etype1727; + xfer += iprot->readListBegin(_etype1727, _size1724); + this->group_names.resize(_size1724); + uint32_t _i1728; + for (_i1728 = 0; _i1728 < _size1724; ++_i1728) { - xfer += iprot->readString(this->group_names[_i1701]); + xfer += iprot->readString(this->group_names[_i1728]); } xfer += iprot->readListEnd(); } @@ -31928,10 +32406,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1702; - for (_iter1702 = this->group_names.begin(); _iter1702 != this->group_names.end(); ++_iter1702) + std::vector ::const_iterator _iter1729; + for (_iter1729 = this->group_names.begin(); _iter1729 != this->group_names.end(); ++_iter1729) { - xfer += oprot->writeString((*_iter1702)); + xfer += oprot->writeString((*_iter1729)); } xfer += oprot->writeListEnd(); } @@ -31963,10 +32441,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1703; - for (_iter1703 = (*(this->group_names)).begin(); _iter1703 != (*(this->group_names)).end(); ++_iter1703) + std::vector ::const_iterator _iter1730; + for (_iter1730 = (*(this->group_names)).begin(); _iter1730 != (*(this->group_names)).end(); ++_iter1730) { - xfer += oprot->writeString((*_iter1703)); + xfer += oprot->writeString((*_iter1730)); } xfer += oprot->writeListEnd(); } @@ -32141,9 +32619,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1704; - xfer += iprot->readI32(ecast1704); - this->principal_type = (PrincipalType::type)ecast1704; + int32_t ecast1731; + xfer += iprot->readI32(ecast1731); + this->principal_type = (PrincipalType::type)ecast1731; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -32248,14 +32726,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1705; - ::apache::thrift::protocol::TType _etype1708; - xfer += iprot->readListBegin(_etype1708, _size1705); - this->success.resize(_size1705); - uint32_t _i1709; - for (_i1709 = 0; _i1709 < _size1705; ++_i1709) + uint32_t _size1732; + ::apache::thrift::protocol::TType _etype1735; + xfer += iprot->readListBegin(_etype1735, _size1732); + this->success.resize(_size1732); + uint32_t _i1736; + for (_i1736 = 0; _i1736 < _size1732; ++_i1736) { - xfer += this->success[_i1709].read(iprot); + xfer += this->success[_i1736].read(iprot); } xfer += iprot->readListEnd(); } @@ -32294,10 +32772,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1710; - for (_iter1710 = this->success.begin(); _iter1710 != this->success.end(); ++_iter1710) + std::vector ::const_iterator _iter1737; + for (_iter1737 = this->success.begin(); _iter1737 != this->success.end(); ++_iter1737) { - xfer += (*_iter1710).write(oprot); + xfer += (*_iter1737).write(oprot); } xfer += oprot->writeListEnd(); } @@ -32342,14 +32820,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1711; - ::apache::thrift::protocol::TType _etype1714; - xfer += iprot->readListBegin(_etype1714, _size1711); - (*(this->success)).resize(_size1711); - uint32_t _i1715; - for (_i1715 = 0; _i1715 < _size1711; ++_i1715) + uint32_t _size1738; + ::apache::thrift::protocol::TType _etype1741; + xfer += iprot->readListBegin(_etype1741, _size1738); + (*(this->success)).resize(_size1738); + uint32_t _i1742; + for (_i1742 = 0; _i1742 < _size1738; ++_i1742) { - xfer += (*(this->success))[_i1715].read(iprot); + xfer += (*(this->success))[_i1742].read(iprot); } xfer += iprot->readListEnd(); } @@ -33037,14 +33515,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1716; - ::apache::thrift::protocol::TType _etype1719; - xfer += iprot->readListBegin(_etype1719, _size1716); - this->group_names.resize(_size1716); - uint32_t _i1720; - for (_i1720 = 0; _i1720 < _size1716; ++_i1720) + uint32_t _size1743; + ::apache::thrift::protocol::TType _etype1746; + xfer += iprot->readListBegin(_etype1746, _size1743); + this->group_names.resize(_size1743); + uint32_t _i1747; + for (_i1747 = 0; _i1747 < _size1743; ++_i1747) { - xfer += iprot->readString(this->group_names[_i1720]); + xfer += iprot->readString(this->group_names[_i1747]); } xfer += iprot->readListEnd(); } @@ -33077,10 +33555,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1721; - for (_iter1721 = this->group_names.begin(); _iter1721 != this->group_names.end(); ++_iter1721) + std::vector ::const_iterator _iter1748; + for (_iter1748 = this->group_names.begin(); _iter1748 != this->group_names.end(); ++_iter1748) { - xfer += oprot->writeString((*_iter1721)); + xfer += oprot->writeString((*_iter1748)); } xfer += oprot->writeListEnd(); } @@ -33108,10 +33586,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1722; - for (_iter1722 = (*(this->group_names)).begin(); _iter1722 != (*(this->group_names)).end(); ++_iter1722) + std::vector ::const_iterator _iter1749; + for (_iter1749 = (*(this->group_names)).begin(); _iter1749 != (*(this->group_names)).end(); ++_iter1749) { - xfer += oprot->writeString((*_iter1722)); + xfer += oprot->writeString((*_iter1749)); } xfer += oprot->writeListEnd(); } @@ -33152,14 +33630,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1723; - ::apache::thrift::protocol::TType _etype1726; - xfer += iprot->readListBegin(_etype1726, _size1723); - this->success.resize(_size1723); - uint32_t _i1727; - for (_i1727 = 0; _i1727 < _size1723; ++_i1727) + uint32_t _size1750; + ::apache::thrift::protocol::TType _etype1753; + xfer += iprot->readListBegin(_etype1753, _size1750); + this->success.resize(_size1750); + uint32_t _i1754; + for (_i1754 = 0; _i1754 < _size1750; ++_i1754) { - xfer += iprot->readString(this->success[_i1727]); + xfer += iprot->readString(this->success[_i1754]); } xfer += iprot->readListEnd(); } @@ -33198,10 +33676,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1728; - for (_iter1728 = this->success.begin(); _iter1728 != this->success.end(); ++_iter1728) + std::vector ::const_iterator _iter1755; + for (_iter1755 = this->success.begin(); _iter1755 != this->success.end(); ++_iter1755) { - xfer += oprot->writeString((*_iter1728)); + xfer += oprot->writeString((*_iter1755)); } xfer += oprot->writeListEnd(); } @@ -33246,14 +33724,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1729; - ::apache::thrift::protocol::TType _etype1732; - xfer += iprot->readListBegin(_etype1732, _size1729); - (*(this->success)).resize(_size1729); - uint32_t _i1733; - for (_i1733 = 0; _i1733 < _size1729; ++_i1733) + uint32_t _size1756; + ::apache::thrift::protocol::TType _etype1759; + xfer += iprot->readListBegin(_etype1759, _size1756); + (*(this->success)).resize(_size1756); + uint32_t _i1760; + for (_i1760 = 0; _i1760 < _size1756; ++_i1760) { - xfer += iprot->readString((*(this->success))[_i1733]); + xfer += iprot->readString((*(this->success))[_i1760]); } xfer += iprot->readListEnd(); } @@ -34564,14 +35042,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1734; - ::apache::thrift::protocol::TType _etype1737; - xfer += iprot->readListBegin(_etype1737, _size1734); - this->success.resize(_size1734); - uint32_t _i1738; - for (_i1738 = 0; _i1738 < _size1734; ++_i1738) + uint32_t _size1761; + ::apache::thrift::protocol::TType _etype1764; + xfer += iprot->readListBegin(_etype1764, _size1761); + this->success.resize(_size1761); + uint32_t _i1765; + for (_i1765 = 0; _i1765 < _size1761; ++_i1765) { - xfer += iprot->readString(this->success[_i1738]); + xfer += iprot->readString(this->success[_i1765]); } xfer += iprot->readListEnd(); } @@ -34602,10 +35080,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1739; - for (_iter1739 = this->success.begin(); _iter1739 != this->success.end(); ++_iter1739) + std::vector ::const_iterator _iter1766; + for (_iter1766 = this->success.begin(); _iter1766 != this->success.end(); ++_iter1766) { - xfer += oprot->writeString((*_iter1739)); + xfer += oprot->writeString((*_iter1766)); } xfer += oprot->writeListEnd(); } @@ -34646,14 +35124,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1740; - ::apache::thrift::protocol::TType _etype1743; - xfer += iprot->readListBegin(_etype1743, _size1740); - (*(this->success)).resize(_size1740); - uint32_t _i1744; - for (_i1744 = 0; _i1744 < _size1740; ++_i1744) + uint32_t _size1767; + ::apache::thrift::protocol::TType _etype1770; + xfer += iprot->readListBegin(_etype1770, _size1767); + (*(this->success)).resize(_size1767); + uint32_t _i1771; + for (_i1771 = 0; _i1771 < _size1767; ++_i1771) { - xfer += iprot->readString((*(this->success))[_i1744]); + xfer += iprot->readString((*(this->success))[_i1771]); } xfer += iprot->readListEnd(); } @@ -35379,14 +35857,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1745; - ::apache::thrift::protocol::TType _etype1748; - xfer += iprot->readListBegin(_etype1748, _size1745); - this->success.resize(_size1745); - uint32_t _i1749; - for (_i1749 = 0; _i1749 < _size1745; ++_i1749) + uint32_t _size1772; + ::apache::thrift::protocol::TType _etype1775; + xfer += iprot->readListBegin(_etype1775, _size1772); + this->success.resize(_size1772); + uint32_t _i1776; + for (_i1776 = 0; _i1776 < _size1772; ++_i1776) { - xfer += iprot->readString(this->success[_i1749]); + xfer += iprot->readString(this->success[_i1776]); } xfer += iprot->readListEnd(); } @@ -35417,10 +35895,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1750; - for (_iter1750 = this->success.begin(); _iter1750 != this->success.end(); ++_iter1750) + std::vector ::const_iterator _iter1777; + for (_iter1777 = this->success.begin(); _iter1777 != this->success.end(); ++_iter1777) { - xfer += oprot->writeString((*_iter1750)); + xfer += oprot->writeString((*_iter1777)); } xfer += oprot->writeListEnd(); } @@ -35461,14 +35939,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1751; - ::apache::thrift::protocol::TType _etype1754; - xfer += iprot->readListBegin(_etype1754, _size1751); - (*(this->success)).resize(_size1751); - uint32_t _i1755; - for (_i1755 = 0; _i1755 < _size1751; ++_i1755) + uint32_t _size1778; + ::apache::thrift::protocol::TType _etype1781; + xfer += iprot->readListBegin(_etype1781, _size1778); + (*(this->success)).resize(_size1778); + uint32_t _i1782; + for (_i1782 = 0; _i1782 < _size1778; ++_i1782) { - xfer += iprot->readString((*(this->success))[_i1755]); + xfer += iprot->readString((*(this->success))[_i1782]); } xfer += iprot->readListEnd(); } @@ -46581,13 +47059,13 @@ void ThriftHiveMetastoreClient::recv_create_table_with_environment_context() return; } -void ThriftHiveMetastoreClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) { - send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); recv_create_table_with_constraints(); } -void ThriftHiveMetastoreClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) { int32_t cseqid = 0; oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); @@ -46598,6 +47076,7 @@ void ThriftHiveMetastoreClient::send_create_table_with_constraints(const Table& args.foreignKeys = &foreignKeys; args.uniqueConstraints = &uniqueConstraints; args.notNullConstraints = ¬NullConstraints; + args.defaultConstraints = &defaultConstraints; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46945,6 +47424,65 @@ void ThriftHiveMetastoreClient::recv_add_not_null_constraint() return; } +void ThriftHiveMetastoreClient::add_default_constraint(const AddDefaultConstraintRequest& req) +{ + send_add_default_constraint(req); + recv_add_default_constraint(); +} + +void ThriftHiveMetastoreClient::send_add_default_constraint(const AddDefaultConstraintRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("add_default_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_add_default_constraint_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_add_default_constraint() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("add_default_constraint") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_add_default_constraint_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + return; +} + void ThriftHiveMetastoreClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { send_drop_table(dbname, name, deleteData); @@ -51591,6 +52129,70 @@ void ThriftHiveMetastoreClient::recv_get_not_null_constraints(NotNullConstraints throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_not_null_constraints failed: unknown result"); } +void ThriftHiveMetastoreClient::get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) +{ + send_get_default_constraints(request); + recv_get_default_constraints(_return); +} + +void ThriftHiveMetastoreClient::send_get_default_constraints(const DefaultConstraintsRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_default_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_default_constraints_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_default_constraints(DefaultConstraintsResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_default_constraints") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_default_constraints_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_default_constraints failed: unknown result"); +} + bool ThriftHiveMetastoreClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { send_update_table_column_statistics(stats_obj); @@ -58252,7 +58854,7 @@ void ThriftHiveMetastoreProcessor::process_create_table_with_constraints(int32_t ThriftHiveMetastore_create_table_with_constraints_result result; try { - iface_->create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints); + iface_->create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints); } catch (AlreadyExistsException &o1) { result.o1 = o1; result.__isset.o1 = true; @@ -58589,6 +59191,65 @@ void ThriftHiveMetastoreProcessor::process_add_not_null_constraint(int32_t seqid } } +void ThriftHiveMetastoreProcessor::process_add_default_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_default_constraint", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_default_constraint"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_default_constraint"); + } + + ThriftHiveMetastore_add_default_constraint_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_default_constraint", bytes); + } + + ThriftHiveMetastore_add_default_constraint_result result; + try { + iface_->add_default_constraint(args.req); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_default_constraint"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("add_default_constraint", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_default_constraint"); + } + + oprot->writeMessageBegin("add_default_constraint", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_default_constraint", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -62874,6 +63535,66 @@ void ThriftHiveMetastoreProcessor::process_get_not_null_constraints(int32_t seqi } } +void ThriftHiveMetastoreProcessor::process_get_default_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_default_constraints", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_default_constraints"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_default_constraints"); + } + + ThriftHiveMetastore_get_default_constraints_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_default_constraints", bytes); + } + + ThriftHiveMetastore_get_default_constraints_result result; + try { + iface_->get_default_constraints(result.success, args.request); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_default_constraints"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_default_constraints", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_default_constraints"); + } + + oprot->writeMessageBegin("get_default_constraints", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_default_constraints", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_update_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -68405,7 +69126,191 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_database") != 0) { + if (fname.compare("get_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_database_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t seqid = send_drop_database(name, deleteData, cascade); + recv_drop_database(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_database_pargs args; + args.name = &name; + args.deleteData = &deleteData; + args.cascade = &cascade; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_drop_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +{ + int32_t seqid = send_get_databases(pattern); + recv_get_databases(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_databases") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68414,7 +69319,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_database_presult result; + ThriftHiveMetastore_get_databases_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68429,104 +69334,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) -{ - int32_t seqid = send_drop_database(name, deleteData, cascade); - recv_drop_database(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_drop_database_pargs args; - args.name = &name; - args.deleteData = &deleteData; - args.cascade = &cascade; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("drop_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_drop_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - sentry.commit(); - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68536,20 +69345,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_all_databases(std::vector & _return) { - int32_t seqid = send_get_databases(pattern); - recv_get_databases(_return, seqid); + int32_t seqid = send_get_all_databases(); + recv_get_all_databases(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_databases_pargs args; - args.pattern = &pattern; + ThriftHiveMetastore_get_all_databases_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68560,7 +69368,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68589,7 +69397,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_databases") != 0) { + if (fname.compare("get_all_databases") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68598,7 +69406,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); @@ -68614,7 +69422,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -68624,19 +69432,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::alter_database(const std::string& dbname, const Database& db) { - int32_t seqid = send_get_all_databases(); - recv_get_all_databases(_return, seqid); + int32_t seqid = send_alter_database(dbname, db); + recv_alter_database(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::string& dbname, const Database& db) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_databases_pargs args; + ThriftHiveMetastore_alter_database_pargs args; + args.dbname = &dbname; + args.db = &db; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68647,7 +69457,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) { int32_t rseqid = 0; @@ -68676,7 +69486,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_databases") != 0) { + if (fname.compare("alter_database") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68685,23 +69495,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68711,21 +69519,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_database_pargs args; - args.dbname = &dbname; - args.db = &db; + ThriftHiveMetastore_get_type_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68736,7 +69543,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68765,7 +69572,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_database") != 0) { + if (fname.compare("get_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68774,11 +69581,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_database_presult result; + ThriftHiveMetastore_get_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -68787,8 +69600,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68798,20 +69611,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) +bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) { - int32_t seqid = send_get_type(name); - recv_get_type(_return, seqid); + int32_t seqid = send_create_type(type); + return recv_create_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_pargs args; - args.name = &name; + ThriftHiveMetastore_create_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68822,7 +69635,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& na return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) { int32_t rseqid = 0; @@ -68851,7 +69664,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type") != 0) { + if (fname.compare("create_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68860,16 +69673,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_type_presult result; + bool _return; + ThriftHiveMetastore_create_type_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -68879,8 +69692,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68890,19 +69707,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) +bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) { - int32_t seqid = send_create_type(type); - return recv_create_type(seqid); + int32_t seqid = send_drop_type(type); + return recv_drop_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_type_pargs args; + ThriftHiveMetastore_drop_type_pargs args; args.type = &type; args.write(oprot_); @@ -68914,7 +69731,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) { int32_t rseqid = 0; @@ -68943,7 +69760,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_type") != 0) { + if (fname.compare("drop_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68953,7 +69770,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_create_type_presult result; + ThriftHiveMetastore_drop_type_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68971,12 +69788,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68986,20 +69799,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) +void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) { - int32_t seqid = send_drop_type(type); - return recv_drop_type(seqid); + int32_t seqid = send_get_type_all(name); + recv_get_type_all(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_type_pargs args; - args.type = &type; + ThriftHiveMetastore_get_type_all_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69010,7 +69823,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69039,7 +69852,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_type") != 0) { + if (fname.compare("get_type_all") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69048,27 +69861,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_type_presult result; + ThriftHiveMetastore_get_type_all_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; + return; } if (result.__isset.o2) { sentry.commit(); throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69078,20 +69887,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_get_type_all(name); - recv_get_type_all(_return, seqid); + int32_t seqid = send_get_fields(db_name, table_name); + recv_get_fields(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_all_pargs args; - args.name = &name; + ThriftHiveMetastore_get_fields_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69102,7 +69912,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69131,7 +69941,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type_all") != 0) { + if (fname.compare("get_fields") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69140,7 +69950,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); @@ -69151,12 +69961,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapsync_.updatePending(fname, mtype, rseqid); @@ -69166,21 +69984,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_fields(db_name, table_name); - recv_get_fields(_return, seqid); + int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); + recv_get_fields_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_pargs args; + ThriftHiveMetastore_get_fields_with_environment_context_pargs args; args.db_name = &db_name; args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69191,7 +70010,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69220,7 +70039,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields") != 0) { + if (fname.compare("get_fields_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69229,7 +70048,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); @@ -69253,7 +70072,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -69263,22 +70082,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); - recv_get_fields_with_environment_context(_return, seqid); + int32_t seqid = send_get_schema(db_name, table_name); + recv_get_schema(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_with_environment_context_pargs args; + ThriftHiveMetastore_get_schema_pargs args; args.db_name = &db_name; args.table_name = &table_name; - args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69289,7 +70107,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69318,7 +70136,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields_with_environment_context") != 0) { + if (fname.compare("get_schema") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69327,7 +70145,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_fields_with_environment_context_presult result; + ThriftHiveMetastore_get_schema_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69351,7 +70169,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69361,21 +70179,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_schema(db_name, table_name); - recv_get_schema(_return, seqid); + int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); + recv_get_schema_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_pargs args; + ThriftHiveMetastore_get_schema_with_environment_context_pargs args; args.db_name = &db_name; args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69386,7 +70205,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69415,7 +70234,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema") != 0) { + if (fname.compare("get_schema_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69424,7 +70243,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); @@ -69448,7 +70267,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -69458,22 +70277,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) { - int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); - recv_get_schema_with_environment_context(_return, seqid); + int32_t seqid = send_create_table(tbl); + recv_create_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_create_table_pargs args; + args.tbl = &tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69484,7 +70301,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) { int32_t rseqid = 0; @@ -69513,7 +70330,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema_with_environment_context") != 0) { + if (fname.compare("create_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69522,17 +70339,106 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_schema_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +{ + int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); + recv_create_table_with_environment_context(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_table_with_environment_context_pargs args; + args.tbl = &tbl; + args.environment_context = &environment_context; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_table_with_environment_context") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_create_table_with_environment_context_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -69545,8 +70451,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte sentry.commit(); throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69556,20 +70466,25 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) +void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) { - int32_t seqid = send_create_table(tbl); - recv_create_table(seqid); + int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); + recv_create_table_with_constraints(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_pargs args; + ThriftHiveMetastore_create_table_with_constraints_pargs args; args.tbl = &tbl; + args.primaryKeys = &primaryKeys; + args.foreignKeys = &foreignKeys; + args.uniqueConstraints = &uniqueConstraints; + args.notNullConstraints = ¬NullConstraints; + args.defaultConstraints = &defaultConstraints; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69580,7 +70495,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) { int32_t rseqid = 0; @@ -69609,7 +70524,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table") != 0) { + if (fname.compare("create_table_with_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69618,7 +70533,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_presult result; + ThriftHiveMetastore_create_table_with_constraints_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69650,21 +70565,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) { - int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); - recv_create_table_with_environment_context(seqid); + int32_t seqid = send_drop_constraint(req); + recv_drop_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_environment_context_pargs args; - args.tbl = &tbl; - args.environment_context = &environment_context; + ThriftHiveMetastore_drop_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69675,7 +70589,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -69704,7 +70618,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_environment_context") != 0) { + if (fname.compare("drop_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69713,7 +70627,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_environment_context_presult result; + ThriftHiveMetastore_drop_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69722,18 +70636,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -69745,24 +70651,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) { - int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); - recv_create_table_with_constraints(seqid); + int32_t seqid = send_add_primary_key(req); + recv_add_primary_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_constraints_pargs args; - args.tbl = &tbl; - args.primaryKeys = &primaryKeys; - args.foreignKeys = &foreignKeys; - args.uniqueConstraints = &uniqueConstraints; - args.notNullConstraints = ¬NullConstraints; + ThriftHiveMetastore_add_primary_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -69773,7 +70675,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) { int32_t rseqid = 0; @@ -69802,7 +70704,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_constraints") != 0) { + if (fname.compare("add_primary_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69811,7 +70713,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_constraints_presult result; + ThriftHiveMetastore_add_primary_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69824,100 +70726,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - sentry.commit(); - return; - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) -{ - int32_t seqid = send_drop_constraint(req); - recv_drop_constraint(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_drop_constraint_pargs args; - args.req = &req; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("drop_constraint") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_drop_constraint_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } sentry.commit(); return; } @@ -69929,19 +70737,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) { - int32_t seqid = send_add_primary_key(req); - recv_add_primary_key(seqid); + int32_t seqid = send_add_foreign_key(req); + recv_add_foreign_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_primary_key_pargs args; + ThriftHiveMetastore_add_foreign_key_pargs args; args.req = &req; args.write(oprot_); @@ -69953,7 +70761,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrima return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) { int32_t rseqid = 0; @@ -69982,7 +70790,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_primary_key") != 0) { + if (fname.compare("add_foreign_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69991,7 +70799,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_primary_key_presult result; + ThriftHiveMetastore_add_foreign_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70015,19 +70823,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::add_unique_constraint(const AddUniqueConstraintRequest& req) { - int32_t seqid = send_add_foreign_key(req); - recv_add_foreign_key(seqid); + int32_t seqid = send_add_unique_constraint(req); + recv_add_unique_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_foreign_key_pargs args; + ThriftHiveMetastore_add_unique_constraint_pargs args; args.req = &req; args.write(oprot_); @@ -70039,7 +70847,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForei return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -70068,7 +70876,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_foreign_key") != 0) { + if (fname.compare("add_unique_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70077,7 +70885,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_foreign_key_presult result; + ThriftHiveMetastore_add_unique_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70101,19 +70909,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_unique_constraint(const AddUniqueConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) { - int32_t seqid = send_add_unique_constraint(req); - recv_add_unique_constraint(seqid); + int32_t seqid = send_add_not_null_constraint(req); + recv_add_not_null_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const AddUniqueConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_unique_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_unique_constraint_pargs args; + ThriftHiveMetastore_add_not_null_constraint_pargs args; args.req = &req; args.write(oprot_); @@ -70125,7 +70933,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_unique_constraint(const Ad return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -70154,7 +70962,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_unique_constraint") != 0) { + if (fname.compare("add_not_null_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70163,7 +70971,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_unique_constraint_presult result; + ThriftHiveMetastore_add_not_null_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70187,19 +70995,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_unique_constraint(const int32 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_not_null_constraint(const AddNotNullConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::add_default_constraint(const AddDefaultConstraintRequest& req) { - int32_t seqid = send_add_not_null_constraint(req); - recv_add_not_null_constraint(seqid); + int32_t seqid = send_add_default_constraint(req); + recv_add_default_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const AddNotNullConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_default_constraint(const AddDefaultConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_not_null_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_default_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_not_null_constraint_pargs args; + ThriftHiveMetastore_add_default_constraint_pargs args; args.req = &req; args.write(oprot_); @@ -70211,7 +71019,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_not_null_constraint(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_default_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -70240,7 +71048,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_not_null_constraint") != 0) { + if (fname.compare("add_default_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -70249,7 +71057,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_not_null_constraint(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_not_null_constraint_presult result; + ThriftHiveMetastore_add_default_constraint_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -76906,6 +77714,98 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_not_null_constraints(NotNullC } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) +{ + int32_t seqid = send_get_default_constraints(request); + recv_get_default_constraints(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_default_constraints(const DefaultConstraintsRequest& request) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_default_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_default_constraints_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_default_constraints(DefaultConstraintsResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_default_constraints") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_default_constraints_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_default_constraints failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { int32_t seqid = send_update_table_column_statistics(stats_obj); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index f4a9c756e1..353d7d9ec7 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -40,12 +40,13 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) = 0; virtual void create_table(const Table& tbl) = 0; virtual void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) = 0; - virtual void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) = 0; + virtual void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) = 0; virtual void drop_constraint(const DropConstraintRequest& req) = 0; virtual void add_primary_key(const AddPrimaryKeyRequest& req) = 0; virtual void add_foreign_key(const AddForeignKeyRequest& req) = 0; virtual void add_unique_constraint(const AddUniqueConstraintRequest& req) = 0; virtual void add_not_null_constraint(const AddNotNullConstraintRequest& req) = 0; + virtual void add_default_constraint(const AddDefaultConstraintRequest& req) = 0; virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0; virtual void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) = 0; virtual void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector & partNames) = 0; @@ -117,6 +118,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) = 0; virtual void get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) = 0; virtual void get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) = 0; + virtual void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) = 0; virtual bool update_table_column_statistics(const ColumnStatistics& stats_obj) = 0; virtual bool update_partition_column_statistics(const ColumnStatistics& stats_obj) = 0; virtual void get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) = 0; @@ -291,7 +293,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void create_table_with_environment_context(const Table& /* tbl */, const EnvironmentContext& /* environment_context */) { return; } - void create_table_with_constraints(const Table& /* tbl */, const std::vector & /* primaryKeys */, const std::vector & /* foreignKeys */, const std::vector & /* uniqueConstraints */, const std::vector & /* notNullConstraints */) { + void create_table_with_constraints(const Table& /* tbl */, const std::vector & /* primaryKeys */, const std::vector & /* foreignKeys */, const std::vector & /* uniqueConstraints */, const std::vector & /* notNullConstraints */, const std::vector & /* defaultConstraints */) { return; } void drop_constraint(const DropConstraintRequest& /* req */) { @@ -309,6 +311,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void add_not_null_constraint(const AddNotNullConstraintRequest& /* req */) { return; } + void add_default_constraint(const AddDefaultConstraintRequest& /* req */) { + return; + } void drop_table(const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */) { return; } @@ -532,6 +537,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_not_null_constraints(NotNullConstraintsResponse& /* _return */, const NotNullConstraintsRequest& /* request */) { return; } + void get_default_constraints(DefaultConstraintsResponse& /* _return */, const DefaultConstraintsRequest& /* request */) { + return; + } bool update_table_column_statistics(const ColumnStatistics& /* stats_obj */) { bool _return = false; return _return; @@ -3043,12 +3051,13 @@ class ThriftHiveMetastore_create_table_with_environment_context_presult { }; typedef struct _ThriftHiveMetastore_create_table_with_constraints_args__isset { - _ThriftHiveMetastore_create_table_with_constraints_args__isset() : tbl(false), primaryKeys(false), foreignKeys(false), uniqueConstraints(false), notNullConstraints(false) {} + _ThriftHiveMetastore_create_table_with_constraints_args__isset() : tbl(false), primaryKeys(false), foreignKeys(false), uniqueConstraints(false), notNullConstraints(false), defaultConstraints(false) {} bool tbl :1; bool primaryKeys :1; bool foreignKeys :1; bool uniqueConstraints :1; bool notNullConstraints :1; + bool defaultConstraints :1; } _ThriftHiveMetastore_create_table_with_constraints_args__isset; class ThriftHiveMetastore_create_table_with_constraints_args { @@ -3065,6 +3074,7 @@ class ThriftHiveMetastore_create_table_with_constraints_args { std::vector foreignKeys; std::vector uniqueConstraints; std::vector notNullConstraints; + std::vector defaultConstraints; _ThriftHiveMetastore_create_table_with_constraints_args__isset __isset; @@ -3078,6 +3088,8 @@ class ThriftHiveMetastore_create_table_with_constraints_args { void __set_notNullConstraints(const std::vector & val); + void __set_defaultConstraints(const std::vector & val); + bool operator == (const ThriftHiveMetastore_create_table_with_constraints_args & rhs) const { if (!(tbl == rhs.tbl)) @@ -3090,6 +3102,8 @@ class ThriftHiveMetastore_create_table_with_constraints_args { return false; if (!(notNullConstraints == rhs.notNullConstraints)) return false; + if (!(defaultConstraints == rhs.defaultConstraints)) + return false; return true; } bool operator != (const ThriftHiveMetastore_create_table_with_constraints_args &rhs) const { @@ -3114,6 +3128,7 @@ class ThriftHiveMetastore_create_table_with_constraints_pargs { const std::vector * foreignKeys; const std::vector * uniqueConstraints; const std::vector * notNullConstraints; + const std::vector * defaultConstraints; uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; @@ -3758,6 +3773,118 @@ class ThriftHiveMetastore_add_not_null_constraint_presult { }; +typedef struct _ThriftHiveMetastore_add_default_constraint_args__isset { + _ThriftHiveMetastore_add_default_constraint_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_add_default_constraint_args__isset; + +class ThriftHiveMetastore_add_default_constraint_args { + public: + + ThriftHiveMetastore_add_default_constraint_args(const ThriftHiveMetastore_add_default_constraint_args&); + ThriftHiveMetastore_add_default_constraint_args& operator=(const ThriftHiveMetastore_add_default_constraint_args&); + ThriftHiveMetastore_add_default_constraint_args() { + } + + virtual ~ThriftHiveMetastore_add_default_constraint_args() throw(); + AddDefaultConstraintRequest req; + + _ThriftHiveMetastore_add_default_constraint_args__isset __isset; + + void __set_req(const AddDefaultConstraintRequest& val); + + bool operator == (const ThriftHiveMetastore_add_default_constraint_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_add_default_constraint_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_add_default_constraint_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_add_default_constraint_pargs { + public: + + + virtual ~ThriftHiveMetastore_add_default_constraint_pargs() throw(); + const AddDefaultConstraintRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_add_default_constraint_result__isset { + _ThriftHiveMetastore_add_default_constraint_result__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_add_default_constraint_result__isset; + +class ThriftHiveMetastore_add_default_constraint_result { + public: + + ThriftHiveMetastore_add_default_constraint_result(const ThriftHiveMetastore_add_default_constraint_result&); + ThriftHiveMetastore_add_default_constraint_result& operator=(const ThriftHiveMetastore_add_default_constraint_result&); + ThriftHiveMetastore_add_default_constraint_result() { + } + + virtual ~ThriftHiveMetastore_add_default_constraint_result() throw(); + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_add_default_constraint_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_add_default_constraint_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_add_default_constraint_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_add_default_constraint_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_add_default_constraint_presult__isset { + _ThriftHiveMetastore_add_default_constraint_presult__isset() : o1(false), o2(false) {} + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_add_default_constraint_presult__isset; + +class ThriftHiveMetastore_add_default_constraint_presult { + public: + + + virtual ~ThriftHiveMetastore_add_default_constraint_presult() throw(); + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_add_default_constraint_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_drop_table_args__isset { _ThriftHiveMetastore_drop_table_args__isset() : dbname(false), name(false), deleteData(false) {} bool dbname :1; @@ -13201,6 +13328,126 @@ class ThriftHiveMetastore_get_not_null_constraints_presult { }; +typedef struct _ThriftHiveMetastore_get_default_constraints_args__isset { + _ThriftHiveMetastore_get_default_constraints_args__isset() : request(false) {} + bool request :1; +} _ThriftHiveMetastore_get_default_constraints_args__isset; + +class ThriftHiveMetastore_get_default_constraints_args { + public: + + ThriftHiveMetastore_get_default_constraints_args(const ThriftHiveMetastore_get_default_constraints_args&); + ThriftHiveMetastore_get_default_constraints_args& operator=(const ThriftHiveMetastore_get_default_constraints_args&); + ThriftHiveMetastore_get_default_constraints_args() { + } + + virtual ~ThriftHiveMetastore_get_default_constraints_args() throw(); + DefaultConstraintsRequest request; + + _ThriftHiveMetastore_get_default_constraints_args__isset __isset; + + void __set_request(const DefaultConstraintsRequest& val); + + bool operator == (const ThriftHiveMetastore_get_default_constraints_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_default_constraints_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_default_constraints_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_default_constraints_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_default_constraints_pargs() throw(); + const DefaultConstraintsRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_default_constraints_result__isset { + _ThriftHiveMetastore_get_default_constraints_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_default_constraints_result__isset; + +class ThriftHiveMetastore_get_default_constraints_result { + public: + + ThriftHiveMetastore_get_default_constraints_result(const ThriftHiveMetastore_get_default_constraints_result&); + ThriftHiveMetastore_get_default_constraints_result& operator=(const ThriftHiveMetastore_get_default_constraints_result&); + ThriftHiveMetastore_get_default_constraints_result() { + } + + virtual ~ThriftHiveMetastore_get_default_constraints_result() throw(); + DefaultConstraintsResponse success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_default_constraints_result__isset __isset; + + void __set_success(const DefaultConstraintsResponse& val); + + void __set_o1(const MetaException& val); + + void __set_o2(const NoSuchObjectException& val); + + bool operator == (const ThriftHiveMetastore_get_default_constraints_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_default_constraints_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_default_constraints_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_default_constraints_presult__isset { + _ThriftHiveMetastore_get_default_constraints_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_default_constraints_presult__isset; + +class ThriftHiveMetastore_get_default_constraints_presult { + public: + + + virtual ~ThriftHiveMetastore_get_default_constraints_presult() throw(); + DefaultConstraintsResponse* success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_default_constraints_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_update_table_column_statistics_args__isset { _ThriftHiveMetastore_update_table_column_statistics_args__isset() : stats_obj(false) {} bool stats_obj :1; @@ -23619,8 +23866,8 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); void recv_create_table_with_environment_context(); - void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); - void send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); + void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints); + void send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints); void recv_create_table_with_constraints(); void drop_constraint(const DropConstraintRequest& req); void send_drop_constraint(const DropConstraintRequest& req); @@ -23637,6 +23884,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void add_not_null_constraint(const AddNotNullConstraintRequest& req); void send_add_not_null_constraint(const AddNotNullConstraintRequest& req); void recv_add_not_null_constraint(); + void add_default_constraint(const AddDefaultConstraintRequest& req); + void send_add_default_constraint(const AddDefaultConstraintRequest& req); + void recv_add_default_constraint(); void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); void recv_drop_table(); @@ -23850,6 +24100,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request); void send_get_not_null_constraints(const NotNullConstraintsRequest& request); void recv_get_not_null_constraints(NotNullConstraintsResponse& _return); + void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request); + void send_get_default_constraints(const DefaultConstraintsRequest& request); + void recv_get_default_constraints(DefaultConstraintsResponse& _return); bool update_table_column_statistics(const ColumnStatistics& stats_obj); void send_update_table_column_statistics(const ColumnStatistics& stats_obj); bool recv_update_table_column_statistics(); @@ -24151,6 +24404,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_add_foreign_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_add_unique_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_add_not_null_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_add_default_constraint(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_table_with_environment_context(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_truncate_table(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -24222,6 +24476,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_foreign_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_unique_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_not_null_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_default_constraints(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_update_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_update_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -24339,6 +24594,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["add_foreign_key"] = &ThriftHiveMetastoreProcessor::process_add_foreign_key; processMap_["add_unique_constraint"] = &ThriftHiveMetastoreProcessor::process_add_unique_constraint; processMap_["add_not_null_constraint"] = &ThriftHiveMetastoreProcessor::process_add_not_null_constraint; + processMap_["add_default_constraint"] = &ThriftHiveMetastoreProcessor::process_add_default_constraint; processMap_["drop_table"] = &ThriftHiveMetastoreProcessor::process_drop_table; processMap_["drop_table_with_environment_context"] = &ThriftHiveMetastoreProcessor::process_drop_table_with_environment_context; processMap_["truncate_table"] = &ThriftHiveMetastoreProcessor::process_truncate_table; @@ -24410,6 +24666,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_foreign_keys"] = &ThriftHiveMetastoreProcessor::process_get_foreign_keys; processMap_["get_unique_constraints"] = &ThriftHiveMetastoreProcessor::process_get_unique_constraints; processMap_["get_not_null_constraints"] = &ThriftHiveMetastoreProcessor::process_get_not_null_constraints; + processMap_["get_default_constraints"] = &ThriftHiveMetastoreProcessor::process_get_default_constraints; processMap_["update_table_column_statistics"] = &ThriftHiveMetastoreProcessor::process_update_table_column_statistics; processMap_["update_partition_column_statistics"] = &ThriftHiveMetastoreProcessor::process_update_partition_column_statistics; processMap_["get_table_column_statistics"] = &ThriftHiveMetastoreProcessor::process_get_table_column_statistics; @@ -24704,13 +24961,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->create_table_with_environment_context(tbl, environment_context); } - void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { + void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) { size_t sz = ifaces_.size(); size_t i = 0; for (; i < (sz - 1); ++i) { - ifaces_[i]->create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + ifaces_[i]->create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); } - ifaces_[i]->create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + ifaces_[i]->create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); } void drop_constraint(const DropConstraintRequest& req) { @@ -24758,6 +25015,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->add_not_null_constraint(req); } + void add_default_constraint(const AddDefaultConstraintRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->add_default_constraint(req); + } + ifaces_[i]->add_default_constraint(req); + } + void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { size_t sz = ifaces_.size(); size_t i = 0; @@ -25444,6 +25710,16 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_default_constraints(_return, request); + } + ifaces_[i]->get_default_constraints(_return, request); + return; + } + bool update_table_column_statistics(const ColumnStatistics& stats_obj) { size_t sz = ifaces_.size(); size_t i = 0; @@ -26375,8 +26651,8 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); int32_t send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context); void recv_create_table_with_environment_context(const int32_t seqid); - void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); - int32_t send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints); + void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints); + int32_t send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints); void recv_create_table_with_constraints(const int32_t seqid); void drop_constraint(const DropConstraintRequest& req); int32_t send_drop_constraint(const DropConstraintRequest& req); @@ -26393,6 +26669,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void add_not_null_constraint(const AddNotNullConstraintRequest& req); int32_t send_add_not_null_constraint(const AddNotNullConstraintRequest& req); void recv_add_not_null_constraint(const int32_t seqid); + void add_default_constraint(const AddDefaultConstraintRequest& req); + int32_t send_add_default_constraint(const AddDefaultConstraintRequest& req); + void recv_add_default_constraint(const int32_t seqid); void drop_table(const std::string& dbname, const std::string& name, const bool deleteData); int32_t send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData); void recv_drop_table(const int32_t seqid); @@ -26606,6 +26885,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request); int32_t send_get_not_null_constraints(const NotNullConstraintsRequest& request); void recv_get_not_null_constraints(NotNullConstraintsResponse& _return, const int32_t seqid); + void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request); + int32_t send_get_default_constraints(const DefaultConstraintsRequest& request); + void recv_get_default_constraints(DefaultConstraintsResponse& _return, const int32_t seqid); bool update_table_column_statistics(const ColumnStatistics& stats_obj); int32_t send_update_table_column_statistics(const ColumnStatistics& stats_obj); bool recv_update_table_column_statistics(const int32_t seqid); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index b7326f05fc..16536aa531 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -112,7 +112,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("create_table_with_environment_context\n"); } - void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints) { + void create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys, const std::vector & uniqueConstraints, const std::vector & notNullConstraints, const std::vector & defaultConstraints) { // Your implementation goes here printf("create_table_with_constraints\n"); } @@ -142,6 +142,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("add_not_null_constraint\n"); } + void add_default_constraint(const AddDefaultConstraintRequest& req) { + // Your implementation goes here + printf("add_default_constraint\n"); + } + void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { // Your implementation goes here printf("drop_table\n"); @@ -497,6 +502,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_not_null_constraints\n"); } + void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) { + // Your implementation goes here + printf("get_default_constraints\n"); + } + bool update_table_column_statistics(const ColumnStatistics& stats_obj) { // Your implementation goes here printf("update_table_column_statistics\n"); diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index ef138e00bd..cdcde51210 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -1446,6 +1446,232 @@ void SQLNotNullConstraint::printTo(std::ostream& out) const { } +SQLDefaultConstraint::~SQLDefaultConstraint() throw() { +} + + +void SQLDefaultConstraint::__set_table_db(const std::string& val) { + this->table_db = val; +} + +void SQLDefaultConstraint::__set_table_name(const std::string& val) { + this->table_name = val; +} + +void SQLDefaultConstraint::__set_column_name(const std::string& val) { + this->column_name = val; +} + +void SQLDefaultConstraint::__set_default_value(const std::string& val) { + this->default_value = val; +} + +void SQLDefaultConstraint::__set_dc_name(const std::string& val) { + this->dc_name = val; +} + +void SQLDefaultConstraint::__set_enable_cstr(const bool val) { + this->enable_cstr = val; +} + +void SQLDefaultConstraint::__set_validate_cstr(const bool val) { + this->validate_cstr = val; +} + +void SQLDefaultConstraint::__set_rely_cstr(const bool val) { + this->rely_cstr = val; +} + +uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_db); + this->__isset.table_db = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->column_name); + this->__isset.column_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->default_value); + this->__isset.default_value = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dc_name); + this->__isset.dc_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->enable_cstr); + this->__isset.enable_cstr = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->validate_cstr); + this->__isset.validate_cstr = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 8: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->rely_cstr); + this->__isset.rely_cstr = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t SQLDefaultConstraint::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("SQLDefaultConstraint"); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->table_db); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->column_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("default_value", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->default_value); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->dc_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeBool(this->enable_cstr); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeBool(this->validate_cstr); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeBool(this->rely_cstr); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b) { + using ::std::swap; + swap(a.table_db, b.table_db); + swap(a.table_name, b.table_name); + swap(a.column_name, b.column_name); + swap(a.default_value, b.default_value); + swap(a.dc_name, b.dc_name); + swap(a.enable_cstr, b.enable_cstr); + swap(a.validate_cstr, b.validate_cstr); + swap(a.rely_cstr, b.rely_cstr); + swap(a.__isset, b.__isset); +} + +SQLDefaultConstraint::SQLDefaultConstraint(const SQLDefaultConstraint& other12) { + table_db = other12.table_db; + table_name = other12.table_name; + column_name = other12.column_name; + default_value = other12.default_value; + dc_name = other12.dc_name; + enable_cstr = other12.enable_cstr; + validate_cstr = other12.validate_cstr; + rely_cstr = other12.rely_cstr; + __isset = other12.__isset; +} +SQLDefaultConstraint& SQLDefaultConstraint::operator=(const SQLDefaultConstraint& other13) { + table_db = other13.table_db; + table_name = other13.table_name; + column_name = other13.column_name; + default_value = other13.default_value; + dc_name = other13.dc_name; + enable_cstr = other13.enable_cstr; + validate_cstr = other13.validate_cstr; + rely_cstr = other13.rely_cstr; + __isset = other13.__isset; + return *this; +} +void SQLDefaultConstraint::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "SQLDefaultConstraint("; + out << "table_db=" << to_string(table_db); + out << ", " << "table_name=" << to_string(table_name); + out << ", " << "column_name=" << to_string(column_name); + out << ", " << "default_value=" << to_string(default_value); + out << ", " << "dc_name=" << to_string(dc_name); + out << ", " << "enable_cstr=" << to_string(enable_cstr); + out << ", " << "validate_cstr=" << to_string(validate_cstr); + out << ", " << "rely_cstr=" << to_string(rely_cstr); + out << ")"; +} + + Type::~Type() throw() { } @@ -1518,14 +1744,14 @@ uint32_t Type::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fields.clear(); - uint32_t _size12; - ::apache::thrift::protocol::TType _etype15; - xfer += iprot->readListBegin(_etype15, _size12); - this->fields.resize(_size12); - uint32_t _i16; - for (_i16 = 0; _i16 < _size12; ++_i16) + uint32_t _size14; + ::apache::thrift::protocol::TType _etype17; + xfer += iprot->readListBegin(_etype17, _size14); + this->fields.resize(_size14); + uint32_t _i18; + for (_i18 = 0; _i18 < _size14; ++_i18) { - xfer += this->fields[_i16].read(iprot); + xfer += this->fields[_i18].read(iprot); } xfer += iprot->readListEnd(); } @@ -1569,10 +1795,10 @@ uint32_t Type::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fields", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fields.size())); - std::vector ::const_iterator _iter17; - for (_iter17 = this->fields.begin(); _iter17 != this->fields.end(); ++_iter17) + std::vector ::const_iterator _iter19; + for (_iter19 = this->fields.begin(); _iter19 != this->fields.end(); ++_iter19) { - xfer += (*_iter17).write(oprot); + xfer += (*_iter19).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1592,19 +1818,19 @@ void swap(Type &a, Type &b) { swap(a.__isset, b.__isset); } -Type::Type(const Type& other18) { - name = other18.name; - type1 = other18.type1; - type2 = other18.type2; - fields = other18.fields; - __isset = other18.__isset; +Type::Type(const Type& other20) { + name = other20.name; + type1 = other20.type1; + type2 = other20.type2; + fields = other20.fields; + __isset = other20.__isset; } -Type& Type::operator=(const Type& other19) { - name = other19.name; - type1 = other19.type1; - type2 = other19.type2; - fields = other19.fields; - __isset = other19.__isset; +Type& Type::operator=(const Type& other21) { + name = other21.name; + type1 = other21.type1; + type2 = other21.type2; + fields = other21.fields; + __isset = other21.__isset; return *this; } void Type::printTo(std::ostream& out) const { @@ -1665,9 +1891,9 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast20; - xfer += iprot->readI32(ecast20); - this->objectType = (HiveObjectType::type)ecast20; + int32_t ecast22; + xfer += iprot->readI32(ecast22); + this->objectType = (HiveObjectType::type)ecast22; this->__isset.objectType = true; } else { xfer += iprot->skip(ftype); @@ -1693,14 +1919,14 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partValues.clear(); - uint32_t _size21; - ::apache::thrift::protocol::TType _etype24; - xfer += iprot->readListBegin(_etype24, _size21); - this->partValues.resize(_size21); - uint32_t _i25; - for (_i25 = 0; _i25 < _size21; ++_i25) + uint32_t _size23; + ::apache::thrift::protocol::TType _etype26; + xfer += iprot->readListBegin(_etype26, _size23); + this->partValues.resize(_size23); + uint32_t _i27; + for (_i27 = 0; _i27 < _size23; ++_i27) { - xfer += iprot->readString(this->partValues[_i25]); + xfer += iprot->readString(this->partValues[_i27]); } xfer += iprot->readListEnd(); } @@ -1749,10 +1975,10 @@ uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeFieldBegin("partValues", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partValues.size())); - std::vector ::const_iterator _iter26; - for (_iter26 = this->partValues.begin(); _iter26 != this->partValues.end(); ++_iter26) + std::vector ::const_iterator _iter28; + for (_iter28 = this->partValues.begin(); _iter28 != this->partValues.end(); ++_iter28) { - xfer += oprot->writeString((*_iter26)); + xfer += oprot->writeString((*_iter28)); } xfer += oprot->writeListEnd(); } @@ -1777,21 +2003,21 @@ void swap(HiveObjectRef &a, HiveObjectRef &b) { swap(a.__isset, b.__isset); } -HiveObjectRef::HiveObjectRef(const HiveObjectRef& other27) { - objectType = other27.objectType; - dbName = other27.dbName; - objectName = other27.objectName; - partValues = other27.partValues; - columnName = other27.columnName; - __isset = other27.__isset; -} -HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other28) { - objectType = other28.objectType; - dbName = other28.dbName; - objectName = other28.objectName; - partValues = other28.partValues; - columnName = other28.columnName; - __isset = other28.__isset; +HiveObjectRef::HiveObjectRef(const HiveObjectRef& other29) { + objectType = other29.objectType; + dbName = other29.dbName; + objectName = other29.objectName; + partValues = other29.partValues; + columnName = other29.columnName; + __isset = other29.__isset; +} +HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other30) { + objectType = other30.objectType; + dbName = other30.dbName; + objectName = other30.objectName; + partValues = other30.partValues; + columnName = other30.columnName; + __isset = other30.__isset; return *this; } void HiveObjectRef::printTo(std::ostream& out) const { @@ -1877,9 +2103,9 @@ uint32_t PrivilegeGrantInfo::read(::apache::thrift::protocol::TProtocol* iprot) break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast29; - xfer += iprot->readI32(ecast29); - this->grantorType = (PrincipalType::type)ecast29; + int32_t ecast31; + xfer += iprot->readI32(ecast31); + this->grantorType = (PrincipalType::type)ecast31; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -1945,21 +2171,21 @@ void swap(PrivilegeGrantInfo &a, PrivilegeGrantInfo &b) { swap(a.__isset, b.__isset); } -PrivilegeGrantInfo::PrivilegeGrantInfo(const PrivilegeGrantInfo& other30) { - privilege = other30.privilege; - createTime = other30.createTime; - grantor = other30.grantor; - grantorType = other30.grantorType; - grantOption = other30.grantOption; - __isset = other30.__isset; -} -PrivilegeGrantInfo& PrivilegeGrantInfo::operator=(const PrivilegeGrantInfo& other31) { - privilege = other31.privilege; - createTime = other31.createTime; - grantor = other31.grantor; - grantorType = other31.grantorType; - grantOption = other31.grantOption; - __isset = other31.__isset; +PrivilegeGrantInfo::PrivilegeGrantInfo(const PrivilegeGrantInfo& other32) { + privilege = other32.privilege; + createTime = other32.createTime; + grantor = other32.grantor; + grantorType = other32.grantorType; + grantOption = other32.grantOption; + __isset = other32.__isset; +} +PrivilegeGrantInfo& PrivilegeGrantInfo::operator=(const PrivilegeGrantInfo& other33) { + privilege = other33.privilege; + createTime = other33.createTime; + grantor = other33.grantor; + grantorType = other33.grantorType; + grantOption = other33.grantOption; + __isset = other33.__isset; return *this; } void PrivilegeGrantInfo::printTo(std::ostream& out) const { @@ -2033,9 +2259,9 @@ uint32_t HiveObjectPrivilege::read(::apache::thrift::protocol::TProtocol* iprot) break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast32; - xfer += iprot->readI32(ecast32); - this->principalType = (PrincipalType::type)ecast32; + int32_t ecast34; + xfer += iprot->readI32(ecast34); + this->principalType = (PrincipalType::type)ecast34; this->__isset.principalType = true; } else { xfer += iprot->skip(ftype); @@ -2096,19 +2322,19 @@ void swap(HiveObjectPrivilege &a, HiveObjectPrivilege &b) { swap(a.__isset, b.__isset); } -HiveObjectPrivilege::HiveObjectPrivilege(const HiveObjectPrivilege& other33) { - hiveObject = other33.hiveObject; - principalName = other33.principalName; - principalType = other33.principalType; - grantInfo = other33.grantInfo; - __isset = other33.__isset; +HiveObjectPrivilege::HiveObjectPrivilege(const HiveObjectPrivilege& other35) { + hiveObject = other35.hiveObject; + principalName = other35.principalName; + principalType = other35.principalType; + grantInfo = other35.grantInfo; + __isset = other35.__isset; } -HiveObjectPrivilege& HiveObjectPrivilege::operator=(const HiveObjectPrivilege& other34) { - hiveObject = other34.hiveObject; - principalName = other34.principalName; - principalType = other34.principalType; - grantInfo = other34.grantInfo; - __isset = other34.__isset; +HiveObjectPrivilege& HiveObjectPrivilege::operator=(const HiveObjectPrivilege& other36) { + hiveObject = other36.hiveObject; + principalName = other36.principalName; + principalType = other36.principalType; + grantInfo = other36.grantInfo; + __isset = other36.__isset; return *this; } void HiveObjectPrivilege::printTo(std::ostream& out) const { @@ -2155,14 +2381,14 @@ uint32_t PrivilegeBag::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->privileges.clear(); - uint32_t _size35; - ::apache::thrift::protocol::TType _etype38; - xfer += iprot->readListBegin(_etype38, _size35); - this->privileges.resize(_size35); - uint32_t _i39; - for (_i39 = 0; _i39 < _size35; ++_i39) + uint32_t _size37; + ::apache::thrift::protocol::TType _etype40; + xfer += iprot->readListBegin(_etype40, _size37); + this->privileges.resize(_size37); + uint32_t _i41; + for (_i41 = 0; _i41 < _size37; ++_i41) { - xfer += this->privileges[_i39].read(iprot); + xfer += this->privileges[_i41].read(iprot); } xfer += iprot->readListEnd(); } @@ -2191,10 +2417,10 @@ uint32_t PrivilegeBag::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->privileges.size())); - std::vector ::const_iterator _iter40; - for (_iter40 = this->privileges.begin(); _iter40 != this->privileges.end(); ++_iter40) + std::vector ::const_iterator _iter42; + for (_iter42 = this->privileges.begin(); _iter42 != this->privileges.end(); ++_iter42) { - xfer += (*_iter40).write(oprot); + xfer += (*_iter42).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2211,13 +2437,13 @@ void swap(PrivilegeBag &a, PrivilegeBag &b) { swap(a.__isset, b.__isset); } -PrivilegeBag::PrivilegeBag(const PrivilegeBag& other41) { - privileges = other41.privileges; - __isset = other41.__isset; +PrivilegeBag::PrivilegeBag(const PrivilegeBag& other43) { + privileges = other43.privileges; + __isset = other43.__isset; } -PrivilegeBag& PrivilegeBag::operator=(const PrivilegeBag& other42) { - privileges = other42.privileges; - __isset = other42.__isset; +PrivilegeBag& PrivilegeBag::operator=(const PrivilegeBag& other44) { + privileges = other44.privileges; + __isset = other44.__isset; return *this; } void PrivilegeBag::printTo(std::ostream& out) const { @@ -2269,26 +2495,26 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->userPrivileges.clear(); - uint32_t _size43; - ::apache::thrift::protocol::TType _ktype44; - ::apache::thrift::protocol::TType _vtype45; - xfer += iprot->readMapBegin(_ktype44, _vtype45, _size43); - uint32_t _i47; - for (_i47 = 0; _i47 < _size43; ++_i47) + uint32_t _size45; + ::apache::thrift::protocol::TType _ktype46; + ::apache::thrift::protocol::TType _vtype47; + xfer += iprot->readMapBegin(_ktype46, _vtype47, _size45); + uint32_t _i49; + for (_i49 = 0; _i49 < _size45; ++_i49) { - std::string _key48; - xfer += iprot->readString(_key48); - std::vector & _val49 = this->userPrivileges[_key48]; + std::string _key50; + xfer += iprot->readString(_key50); + std::vector & _val51 = this->userPrivileges[_key50]; { - _val49.clear(); - uint32_t _size50; - ::apache::thrift::protocol::TType _etype53; - xfer += iprot->readListBegin(_etype53, _size50); - _val49.resize(_size50); - uint32_t _i54; - for (_i54 = 0; _i54 < _size50; ++_i54) + _val51.clear(); + uint32_t _size52; + ::apache::thrift::protocol::TType _etype55; + xfer += iprot->readListBegin(_etype55, _size52); + _val51.resize(_size52); + uint32_t _i56; + for (_i56 = 0; _i56 < _size52; ++_i56) { - xfer += _val49[_i54].read(iprot); + xfer += _val51[_i56].read(iprot); } xfer += iprot->readListEnd(); } @@ -2304,26 +2530,26 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->groupPrivileges.clear(); - uint32_t _size55; - ::apache::thrift::protocol::TType _ktype56; - ::apache::thrift::protocol::TType _vtype57; - xfer += iprot->readMapBegin(_ktype56, _vtype57, _size55); - uint32_t _i59; - for (_i59 = 0; _i59 < _size55; ++_i59) + uint32_t _size57; + ::apache::thrift::protocol::TType _ktype58; + ::apache::thrift::protocol::TType _vtype59; + xfer += iprot->readMapBegin(_ktype58, _vtype59, _size57); + uint32_t _i61; + for (_i61 = 0; _i61 < _size57; ++_i61) { - std::string _key60; - xfer += iprot->readString(_key60); - std::vector & _val61 = this->groupPrivileges[_key60]; + std::string _key62; + xfer += iprot->readString(_key62); + std::vector & _val63 = this->groupPrivileges[_key62]; { - _val61.clear(); - uint32_t _size62; - ::apache::thrift::protocol::TType _etype65; - xfer += iprot->readListBegin(_etype65, _size62); - _val61.resize(_size62); - uint32_t _i66; - for (_i66 = 0; _i66 < _size62; ++_i66) + _val63.clear(); + uint32_t _size64; + ::apache::thrift::protocol::TType _etype67; + xfer += iprot->readListBegin(_etype67, _size64); + _val63.resize(_size64); + uint32_t _i68; + for (_i68 = 0; _i68 < _size64; ++_i68) { - xfer += _val61[_i66].read(iprot); + xfer += _val63[_i68].read(iprot); } xfer += iprot->readListEnd(); } @@ -2339,26 +2565,26 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->rolePrivileges.clear(); - uint32_t _size67; - ::apache::thrift::protocol::TType _ktype68; - ::apache::thrift::protocol::TType _vtype69; - xfer += iprot->readMapBegin(_ktype68, _vtype69, _size67); - uint32_t _i71; - for (_i71 = 0; _i71 < _size67; ++_i71) + uint32_t _size69; + ::apache::thrift::protocol::TType _ktype70; + ::apache::thrift::protocol::TType _vtype71; + xfer += iprot->readMapBegin(_ktype70, _vtype71, _size69); + uint32_t _i73; + for (_i73 = 0; _i73 < _size69; ++_i73) { - std::string _key72; - xfer += iprot->readString(_key72); - std::vector & _val73 = this->rolePrivileges[_key72]; + std::string _key74; + xfer += iprot->readString(_key74); + std::vector & _val75 = this->rolePrivileges[_key74]; { - _val73.clear(); - uint32_t _size74; - ::apache::thrift::protocol::TType _etype77; - xfer += iprot->readListBegin(_etype77, _size74); - _val73.resize(_size74); - uint32_t _i78; - for (_i78 = 0; _i78 < _size74; ++_i78) + _val75.clear(); + uint32_t _size76; + ::apache::thrift::protocol::TType _etype79; + xfer += iprot->readListBegin(_etype79, _size76); + _val75.resize(_size76); + uint32_t _i80; + for (_i80 = 0; _i80 < _size76; ++_i80) { - xfer += _val73[_i78].read(iprot); + xfer += _val75[_i80].read(iprot); } xfer += iprot->readListEnd(); } @@ -2390,16 +2616,16 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("userPrivileges", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->userPrivileges.size())); - std::map > ::const_iterator _iter79; - for (_iter79 = this->userPrivileges.begin(); _iter79 != this->userPrivileges.end(); ++_iter79) + std::map > ::const_iterator _iter81; + for (_iter81 = this->userPrivileges.begin(); _iter81 != this->userPrivileges.end(); ++_iter81) { - xfer += oprot->writeString(_iter79->first); + xfer += oprot->writeString(_iter81->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter79->second.size())); - std::vector ::const_iterator _iter80; - for (_iter80 = _iter79->second.begin(); _iter80 != _iter79->second.end(); ++_iter80) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter81->second.size())); + std::vector ::const_iterator _iter82; + for (_iter82 = _iter81->second.begin(); _iter82 != _iter81->second.end(); ++_iter82) { - xfer += (*_iter80).write(oprot); + xfer += (*_iter82).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2411,16 +2637,16 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("groupPrivileges", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->groupPrivileges.size())); - std::map > ::const_iterator _iter81; - for (_iter81 = this->groupPrivileges.begin(); _iter81 != this->groupPrivileges.end(); ++_iter81) + std::map > ::const_iterator _iter83; + for (_iter83 = this->groupPrivileges.begin(); _iter83 != this->groupPrivileges.end(); ++_iter83) { - xfer += oprot->writeString(_iter81->first); + xfer += oprot->writeString(_iter83->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter81->second.size())); - std::vector ::const_iterator _iter82; - for (_iter82 = _iter81->second.begin(); _iter82 != _iter81->second.end(); ++_iter82) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter83->second.size())); + std::vector ::const_iterator _iter84; + for (_iter84 = _iter83->second.begin(); _iter84 != _iter83->second.end(); ++_iter84) { - xfer += (*_iter82).write(oprot); + xfer += (*_iter84).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2432,16 +2658,16 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("rolePrivileges", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->rolePrivileges.size())); - std::map > ::const_iterator _iter83; - for (_iter83 = this->rolePrivileges.begin(); _iter83 != this->rolePrivileges.end(); ++_iter83) + std::map > ::const_iterator _iter85; + for (_iter85 = this->rolePrivileges.begin(); _iter85 != this->rolePrivileges.end(); ++_iter85) { - xfer += oprot->writeString(_iter83->first); + xfer += oprot->writeString(_iter85->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter83->second.size())); - std::vector ::const_iterator _iter84; - for (_iter84 = _iter83->second.begin(); _iter84 != _iter83->second.end(); ++_iter84) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter85->second.size())); + std::vector ::const_iterator _iter86; + for (_iter86 = _iter85->second.begin(); _iter86 != _iter85->second.end(); ++_iter86) { - xfer += (*_iter84).write(oprot); + xfer += (*_iter86).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2463,17 +2689,17 @@ void swap(PrincipalPrivilegeSet &a, PrincipalPrivilegeSet &b) { swap(a.__isset, b.__isset); } -PrincipalPrivilegeSet::PrincipalPrivilegeSet(const PrincipalPrivilegeSet& other85) { - userPrivileges = other85.userPrivileges; - groupPrivileges = other85.groupPrivileges; - rolePrivileges = other85.rolePrivileges; - __isset = other85.__isset; +PrincipalPrivilegeSet::PrincipalPrivilegeSet(const PrincipalPrivilegeSet& other87) { + userPrivileges = other87.userPrivileges; + groupPrivileges = other87.groupPrivileges; + rolePrivileges = other87.rolePrivileges; + __isset = other87.__isset; } -PrincipalPrivilegeSet& PrincipalPrivilegeSet::operator=(const PrincipalPrivilegeSet& other86) { - userPrivileges = other86.userPrivileges; - groupPrivileges = other86.groupPrivileges; - rolePrivileges = other86.rolePrivileges; - __isset = other86.__isset; +PrincipalPrivilegeSet& PrincipalPrivilegeSet::operator=(const PrincipalPrivilegeSet& other88) { + userPrivileges = other88.userPrivileges; + groupPrivileges = other88.groupPrivileges; + rolePrivileges = other88.rolePrivileges; + __isset = other88.__isset; return *this; } void PrincipalPrivilegeSet::printTo(std::ostream& out) const { @@ -2526,9 +2752,9 @@ uint32_t GrantRevokePrivilegeRequest::read(::apache::thrift::protocol::TProtocol { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast87; - xfer += iprot->readI32(ecast87); - this->requestType = (GrantRevokeType::type)ecast87; + int32_t ecast89; + xfer += iprot->readI32(ecast89); + this->requestType = (GrantRevokeType::type)ecast89; this->__isset.requestType = true; } else { xfer += iprot->skip(ftype); @@ -2593,17 +2819,17 @@ void swap(GrantRevokePrivilegeRequest &a, GrantRevokePrivilegeRequest &b) { swap(a.__isset, b.__isset); } -GrantRevokePrivilegeRequest::GrantRevokePrivilegeRequest(const GrantRevokePrivilegeRequest& other88) { - requestType = other88.requestType; - privileges = other88.privileges; - revokeGrantOption = other88.revokeGrantOption; - __isset = other88.__isset; +GrantRevokePrivilegeRequest::GrantRevokePrivilegeRequest(const GrantRevokePrivilegeRequest& other90) { + requestType = other90.requestType; + privileges = other90.privileges; + revokeGrantOption = other90.revokeGrantOption; + __isset = other90.__isset; } -GrantRevokePrivilegeRequest& GrantRevokePrivilegeRequest::operator=(const GrantRevokePrivilegeRequest& other89) { - requestType = other89.requestType; - privileges = other89.privileges; - revokeGrantOption = other89.revokeGrantOption; - __isset = other89.__isset; +GrantRevokePrivilegeRequest& GrantRevokePrivilegeRequest::operator=(const GrantRevokePrivilegeRequest& other91) { + requestType = other91.requestType; + privileges = other91.privileges; + revokeGrantOption = other91.revokeGrantOption; + __isset = other91.__isset; return *this; } void GrantRevokePrivilegeRequest::printTo(std::ostream& out) const { @@ -2687,13 +2913,13 @@ void swap(GrantRevokePrivilegeResponse &a, GrantRevokePrivilegeResponse &b) { swap(a.__isset, b.__isset); } -GrantRevokePrivilegeResponse::GrantRevokePrivilegeResponse(const GrantRevokePrivilegeResponse& other90) { - success = other90.success; - __isset = other90.__isset; +GrantRevokePrivilegeResponse::GrantRevokePrivilegeResponse(const GrantRevokePrivilegeResponse& other92) { + success = other92.success; + __isset = other92.__isset; } -GrantRevokePrivilegeResponse& GrantRevokePrivilegeResponse::operator=(const GrantRevokePrivilegeResponse& other91) { - success = other91.success; - __isset = other91.__isset; +GrantRevokePrivilegeResponse& GrantRevokePrivilegeResponse::operator=(const GrantRevokePrivilegeResponse& other93) { + success = other93.success; + __isset = other93.__isset; return *this; } void GrantRevokePrivilegeResponse::printTo(std::ostream& out) const { @@ -2807,17 +3033,17 @@ void swap(Role &a, Role &b) { swap(a.__isset, b.__isset); } -Role::Role(const Role& other92) { - roleName = other92.roleName; - createTime = other92.createTime; - ownerName = other92.ownerName; - __isset = other92.__isset; +Role::Role(const Role& other94) { + roleName = other94.roleName; + createTime = other94.createTime; + ownerName = other94.ownerName; + __isset = other94.__isset; } -Role& Role::operator=(const Role& other93) { - roleName = other93.roleName; - createTime = other93.createTime; - ownerName = other93.ownerName; - __isset = other93.__isset; +Role& Role::operator=(const Role& other95) { + roleName = other95.roleName; + createTime = other95.createTime; + ownerName = other95.ownerName; + __isset = other95.__isset; return *this; } void Role::printTo(std::ostream& out) const { @@ -2901,9 +3127,9 @@ uint32_t RolePrincipalGrant::read(::apache::thrift::protocol::TProtocol* iprot) break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast94; - xfer += iprot->readI32(ecast94); - this->principalType = (PrincipalType::type)ecast94; + int32_t ecast96; + xfer += iprot->readI32(ecast96); + this->principalType = (PrincipalType::type)ecast96; this->__isset.principalType = true; } else { xfer += iprot->skip(ftype); @@ -2935,9 +3161,9 @@ uint32_t RolePrincipalGrant::read(::apache::thrift::protocol::TProtocol* iprot) break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast95; - xfer += iprot->readI32(ecast95); - this->grantorPrincipalType = (PrincipalType::type)ecast95; + int32_t ecast97; + xfer += iprot->readI32(ecast97); + this->grantorPrincipalType = (PrincipalType::type)ecast97; this->__isset.grantorPrincipalType = true; } else { xfer += iprot->skip(ftype); @@ -3005,25 +3231,25 @@ void swap(RolePrincipalGrant &a, RolePrincipalGrant &b) { swap(a.__isset, b.__isset); } -RolePrincipalGrant::RolePrincipalGrant(const RolePrincipalGrant& other96) { - roleName = other96.roleName; - principalName = other96.principalName; - principalType = other96.principalType; - grantOption = other96.grantOption; - grantTime = other96.grantTime; - grantorName = other96.grantorName; - grantorPrincipalType = other96.grantorPrincipalType; - __isset = other96.__isset; -} -RolePrincipalGrant& RolePrincipalGrant::operator=(const RolePrincipalGrant& other97) { - roleName = other97.roleName; - principalName = other97.principalName; - principalType = other97.principalType; - grantOption = other97.grantOption; - grantTime = other97.grantTime; - grantorName = other97.grantorName; - grantorPrincipalType = other97.grantorPrincipalType; - __isset = other97.__isset; +RolePrincipalGrant::RolePrincipalGrant(const RolePrincipalGrant& other98) { + roleName = other98.roleName; + principalName = other98.principalName; + principalType = other98.principalType; + grantOption = other98.grantOption; + grantTime = other98.grantTime; + grantorName = other98.grantorName; + grantorPrincipalType = other98.grantorPrincipalType; + __isset = other98.__isset; +} +RolePrincipalGrant& RolePrincipalGrant::operator=(const RolePrincipalGrant& other99) { + roleName = other99.roleName; + principalName = other99.principalName; + principalType = other99.principalType; + grantOption = other99.grantOption; + grantTime = other99.grantTime; + grantorName = other99.grantorName; + grantorPrincipalType = other99.grantorPrincipalType; + __isset = other99.__isset; return *this; } void RolePrincipalGrant::printTo(std::ostream& out) const { @@ -3085,9 +3311,9 @@ uint32_t GetRoleGrantsForPrincipalRequest::read(::apache::thrift::protocol::TPro break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast98; - xfer += iprot->readI32(ecast98); - this->principal_type = (PrincipalType::type)ecast98; + int32_t ecast100; + xfer += iprot->readI32(ecast100); + this->principal_type = (PrincipalType::type)ecast100; isset_principal_type = true; } else { xfer += iprot->skip(ftype); @@ -3133,13 +3359,13 @@ void swap(GetRoleGrantsForPrincipalRequest &a, GetRoleGrantsForPrincipalRequest swap(a.principal_type, b.principal_type); } -GetRoleGrantsForPrincipalRequest::GetRoleGrantsForPrincipalRequest(const GetRoleGrantsForPrincipalRequest& other99) { - principal_name = other99.principal_name; - principal_type = other99.principal_type; +GetRoleGrantsForPrincipalRequest::GetRoleGrantsForPrincipalRequest(const GetRoleGrantsForPrincipalRequest& other101) { + principal_name = other101.principal_name; + principal_type = other101.principal_type; } -GetRoleGrantsForPrincipalRequest& GetRoleGrantsForPrincipalRequest::operator=(const GetRoleGrantsForPrincipalRequest& other100) { - principal_name = other100.principal_name; - principal_type = other100.principal_type; +GetRoleGrantsForPrincipalRequest& GetRoleGrantsForPrincipalRequest::operator=(const GetRoleGrantsForPrincipalRequest& other102) { + principal_name = other102.principal_name; + principal_type = other102.principal_type; return *this; } void GetRoleGrantsForPrincipalRequest::printTo(std::ostream& out) const { @@ -3185,14 +3411,14 @@ uint32_t GetRoleGrantsForPrincipalResponse::read(::apache::thrift::protocol::TPr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->principalGrants.clear(); - uint32_t _size101; - ::apache::thrift::protocol::TType _etype104; - xfer += iprot->readListBegin(_etype104, _size101); - this->principalGrants.resize(_size101); - uint32_t _i105; - for (_i105 = 0; _i105 < _size101; ++_i105) + uint32_t _size103; + ::apache::thrift::protocol::TType _etype106; + xfer += iprot->readListBegin(_etype106, _size103); + this->principalGrants.resize(_size103); + uint32_t _i107; + for (_i107 = 0; _i107 < _size103; ++_i107) { - xfer += this->principalGrants[_i105].read(iprot); + xfer += this->principalGrants[_i107].read(iprot); } xfer += iprot->readListEnd(); } @@ -3223,10 +3449,10 @@ uint32_t GetRoleGrantsForPrincipalResponse::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->principalGrants.size())); - std::vector ::const_iterator _iter106; - for (_iter106 = this->principalGrants.begin(); _iter106 != this->principalGrants.end(); ++_iter106) + std::vector ::const_iterator _iter108; + for (_iter108 = this->principalGrants.begin(); _iter108 != this->principalGrants.end(); ++_iter108) { - xfer += (*_iter106).write(oprot); + xfer += (*_iter108).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3242,11 +3468,11 @@ void swap(GetRoleGrantsForPrincipalResponse &a, GetRoleGrantsForPrincipalRespons swap(a.principalGrants, b.principalGrants); } -GetRoleGrantsForPrincipalResponse::GetRoleGrantsForPrincipalResponse(const GetRoleGrantsForPrincipalResponse& other107) { - principalGrants = other107.principalGrants; +GetRoleGrantsForPrincipalResponse::GetRoleGrantsForPrincipalResponse(const GetRoleGrantsForPrincipalResponse& other109) { + principalGrants = other109.principalGrants; } -GetRoleGrantsForPrincipalResponse& GetRoleGrantsForPrincipalResponse::operator=(const GetRoleGrantsForPrincipalResponse& other108) { - principalGrants = other108.principalGrants; +GetRoleGrantsForPrincipalResponse& GetRoleGrantsForPrincipalResponse::operator=(const GetRoleGrantsForPrincipalResponse& other110) { + principalGrants = other110.principalGrants; return *this; } void GetRoleGrantsForPrincipalResponse::printTo(std::ostream& out) const { @@ -3328,11 +3554,11 @@ void swap(GetPrincipalsInRoleRequest &a, GetPrincipalsInRoleRequest &b) { swap(a.roleName, b.roleName); } -GetPrincipalsInRoleRequest::GetPrincipalsInRoleRequest(const GetPrincipalsInRoleRequest& other109) { - roleName = other109.roleName; +GetPrincipalsInRoleRequest::GetPrincipalsInRoleRequest(const GetPrincipalsInRoleRequest& other111) { + roleName = other111.roleName; } -GetPrincipalsInRoleRequest& GetPrincipalsInRoleRequest::operator=(const GetPrincipalsInRoleRequest& other110) { - roleName = other110.roleName; +GetPrincipalsInRoleRequest& GetPrincipalsInRoleRequest::operator=(const GetPrincipalsInRoleRequest& other112) { + roleName = other112.roleName; return *this; } void GetPrincipalsInRoleRequest::printTo(std::ostream& out) const { @@ -3377,14 +3603,14 @@ uint32_t GetPrincipalsInRoleResponse::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->principalGrants.clear(); - uint32_t _size111; - ::apache::thrift::protocol::TType _etype114; - xfer += iprot->readListBegin(_etype114, _size111); - this->principalGrants.resize(_size111); - uint32_t _i115; - for (_i115 = 0; _i115 < _size111; ++_i115) + uint32_t _size113; + ::apache::thrift::protocol::TType _etype116; + xfer += iprot->readListBegin(_etype116, _size113); + this->principalGrants.resize(_size113); + uint32_t _i117; + for (_i117 = 0; _i117 < _size113; ++_i117) { - xfer += this->principalGrants[_i115].read(iprot); + xfer += this->principalGrants[_i117].read(iprot); } xfer += iprot->readListEnd(); } @@ -3415,10 +3641,10 @@ uint32_t GetPrincipalsInRoleResponse::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->principalGrants.size())); - std::vector ::const_iterator _iter116; - for (_iter116 = this->principalGrants.begin(); _iter116 != this->principalGrants.end(); ++_iter116) + std::vector ::const_iterator _iter118; + for (_iter118 = this->principalGrants.begin(); _iter118 != this->principalGrants.end(); ++_iter118) { - xfer += (*_iter116).write(oprot); + xfer += (*_iter118).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3434,11 +3660,11 @@ void swap(GetPrincipalsInRoleResponse &a, GetPrincipalsInRoleResponse &b) { swap(a.principalGrants, b.principalGrants); } -GetPrincipalsInRoleResponse::GetPrincipalsInRoleResponse(const GetPrincipalsInRoleResponse& other117) { - principalGrants = other117.principalGrants; +GetPrincipalsInRoleResponse::GetPrincipalsInRoleResponse(const GetPrincipalsInRoleResponse& other119) { + principalGrants = other119.principalGrants; } -GetPrincipalsInRoleResponse& GetPrincipalsInRoleResponse::operator=(const GetPrincipalsInRoleResponse& other118) { - principalGrants = other118.principalGrants; +GetPrincipalsInRoleResponse& GetPrincipalsInRoleResponse::operator=(const GetPrincipalsInRoleResponse& other120) { + principalGrants = other120.principalGrants; return *this; } void GetPrincipalsInRoleResponse::printTo(std::ostream& out) const { @@ -3507,9 +3733,9 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast119; - xfer += iprot->readI32(ecast119); - this->requestType = (GrantRevokeType::type)ecast119; + int32_t ecast121; + xfer += iprot->readI32(ecast121); + this->requestType = (GrantRevokeType::type)ecast121; this->__isset.requestType = true; } else { xfer += iprot->skip(ftype); @@ -3533,9 +3759,9 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast120; - xfer += iprot->readI32(ecast120); - this->principalType = (PrincipalType::type)ecast120; + int32_t ecast122; + xfer += iprot->readI32(ecast122); + this->principalType = (PrincipalType::type)ecast122; this->__isset.principalType = true; } else { xfer += iprot->skip(ftype); @@ -3551,9 +3777,9 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast121; - xfer += iprot->readI32(ecast121); - this->grantorType = (PrincipalType::type)ecast121; + int32_t ecast123; + xfer += iprot->readI32(ecast123); + this->grantorType = (PrincipalType::type)ecast123; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -3632,25 +3858,25 @@ void swap(GrantRevokeRoleRequest &a, GrantRevokeRoleRequest &b) { swap(a.__isset, b.__isset); } -GrantRevokeRoleRequest::GrantRevokeRoleRequest(const GrantRevokeRoleRequest& other122) { - requestType = other122.requestType; - roleName = other122.roleName; - principalName = other122.principalName; - principalType = other122.principalType; - grantor = other122.grantor; - grantorType = other122.grantorType; - grantOption = other122.grantOption; - __isset = other122.__isset; -} -GrantRevokeRoleRequest& GrantRevokeRoleRequest::operator=(const GrantRevokeRoleRequest& other123) { - requestType = other123.requestType; - roleName = other123.roleName; - principalName = other123.principalName; - principalType = other123.principalType; - grantor = other123.grantor; - grantorType = other123.grantorType; - grantOption = other123.grantOption; - __isset = other123.__isset; +GrantRevokeRoleRequest::GrantRevokeRoleRequest(const GrantRevokeRoleRequest& other124) { + requestType = other124.requestType; + roleName = other124.roleName; + principalName = other124.principalName; + principalType = other124.principalType; + grantor = other124.grantor; + grantorType = other124.grantorType; + grantOption = other124.grantOption; + __isset = other124.__isset; +} +GrantRevokeRoleRequest& GrantRevokeRoleRequest::operator=(const GrantRevokeRoleRequest& other125) { + requestType = other125.requestType; + roleName = other125.roleName; + principalName = other125.principalName; + principalType = other125.principalType; + grantor = other125.grantor; + grantorType = other125.grantorType; + grantOption = other125.grantOption; + __isset = other125.__isset; return *this; } void GrantRevokeRoleRequest::printTo(std::ostream& out) const { @@ -3738,13 +3964,13 @@ void swap(GrantRevokeRoleResponse &a, GrantRevokeRoleResponse &b) { swap(a.__isset, b.__isset); } -GrantRevokeRoleResponse::GrantRevokeRoleResponse(const GrantRevokeRoleResponse& other124) { - success = other124.success; - __isset = other124.__isset; +GrantRevokeRoleResponse::GrantRevokeRoleResponse(const GrantRevokeRoleResponse& other126) { + success = other126.success; + __isset = other126.__isset; } -GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRoleResponse& other125) { - success = other125.success; - __isset = other125.__isset; +GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRoleResponse& other127) { + success = other127.success; + __isset = other127.__isset; return *this; } void GrantRevokeRoleResponse::printTo(std::ostream& out) const { @@ -3839,17 +4065,17 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size126; - ::apache::thrift::protocol::TType _ktype127; - ::apache::thrift::protocol::TType _vtype128; - xfer += iprot->readMapBegin(_ktype127, _vtype128, _size126); - uint32_t _i130; - for (_i130 = 0; _i130 < _size126; ++_i130) + uint32_t _size128; + ::apache::thrift::protocol::TType _ktype129; + ::apache::thrift::protocol::TType _vtype130; + xfer += iprot->readMapBegin(_ktype129, _vtype130, _size128); + uint32_t _i132; + for (_i132 = 0; _i132 < _size128; ++_i132) { - std::string _key131; - xfer += iprot->readString(_key131); - std::string& _val132 = this->parameters[_key131]; - xfer += iprot->readString(_val132); + std::string _key133; + xfer += iprot->readString(_key133); + std::string& _val134 = this->parameters[_key133]; + xfer += iprot->readString(_val134); } xfer += iprot->readMapEnd(); } @@ -3876,9 +4102,9 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast133; - xfer += iprot->readI32(ecast133); - this->ownerType = (PrincipalType::type)ecast133; + int32_t ecast135; + xfer += iprot->readI32(ecast135); + this->ownerType = (PrincipalType::type)ecast135; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -3916,11 +4142,11 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter134; - for (_iter134 = this->parameters.begin(); _iter134 != this->parameters.end(); ++_iter134) + std::map ::const_iterator _iter136; + for (_iter136 = this->parameters.begin(); _iter136 != this->parameters.end(); ++_iter136) { - xfer += oprot->writeString(_iter134->first); - xfer += oprot->writeString(_iter134->second); + xfer += oprot->writeString(_iter136->first); + xfer += oprot->writeString(_iter136->second); } xfer += oprot->writeMapEnd(); } @@ -3958,25 +4184,25 @@ void swap(Database &a, Database &b) { swap(a.__isset, b.__isset); } -Database::Database(const Database& other135) { - name = other135.name; - description = other135.description; - locationUri = other135.locationUri; - parameters = other135.parameters; - privileges = other135.privileges; - ownerName = other135.ownerName; - ownerType = other135.ownerType; - __isset = other135.__isset; -} -Database& Database::operator=(const Database& other136) { - name = other136.name; - description = other136.description; - locationUri = other136.locationUri; - parameters = other136.parameters; - privileges = other136.privileges; - ownerName = other136.ownerName; - ownerType = other136.ownerType; - __isset = other136.__isset; +Database::Database(const Database& other137) { + name = other137.name; + description = other137.description; + locationUri = other137.locationUri; + parameters = other137.parameters; + privileges = other137.privileges; + ownerName = other137.ownerName; + ownerType = other137.ownerType; + __isset = other137.__isset; +} +Database& Database::operator=(const Database& other138) { + name = other138.name; + description = other138.description; + locationUri = other138.locationUri; + parameters = other138.parameters; + privileges = other138.privileges; + ownerName = other138.ownerName; + ownerType = other138.ownerType; + __isset = other138.__isset; return *this; } void Database::printTo(std::ostream& out) const { @@ -4050,17 +4276,17 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size137; - ::apache::thrift::protocol::TType _ktype138; - ::apache::thrift::protocol::TType _vtype139; - xfer += iprot->readMapBegin(_ktype138, _vtype139, _size137); - uint32_t _i141; - for (_i141 = 0; _i141 < _size137; ++_i141) + uint32_t _size139; + ::apache::thrift::protocol::TType _ktype140; + ::apache::thrift::protocol::TType _vtype141; + xfer += iprot->readMapBegin(_ktype140, _vtype141, _size139); + uint32_t _i143; + for (_i143 = 0; _i143 < _size139; ++_i143) { - std::string _key142; - xfer += iprot->readString(_key142); - std::string& _val143 = this->parameters[_key142]; - xfer += iprot->readString(_val143); + std::string _key144; + xfer += iprot->readString(_key144); + std::string& _val145 = this->parameters[_key144]; + xfer += iprot->readString(_val145); } xfer += iprot->readMapEnd(); } @@ -4097,11 +4323,11 @@ uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter144; - for (_iter144 = this->parameters.begin(); _iter144 != this->parameters.end(); ++_iter144) + std::map ::const_iterator _iter146; + for (_iter146 = this->parameters.begin(); _iter146 != this->parameters.end(); ++_iter146) { - xfer += oprot->writeString(_iter144->first); - xfer += oprot->writeString(_iter144->second); + xfer += oprot->writeString(_iter146->first); + xfer += oprot->writeString(_iter146->second); } xfer += oprot->writeMapEnd(); } @@ -4120,17 +4346,17 @@ void swap(SerDeInfo &a, SerDeInfo &b) { swap(a.__isset, b.__isset); } -SerDeInfo::SerDeInfo(const SerDeInfo& other145) { - name = other145.name; - serializationLib = other145.serializationLib; - parameters = other145.parameters; - __isset = other145.__isset; +SerDeInfo::SerDeInfo(const SerDeInfo& other147) { + name = other147.name; + serializationLib = other147.serializationLib; + parameters = other147.parameters; + __isset = other147.__isset; } -SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other146) { - name = other146.name; - serializationLib = other146.serializationLib; - parameters = other146.parameters; - __isset = other146.__isset; +SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other148) { + name = other148.name; + serializationLib = other148.serializationLib; + parameters = other148.parameters; + __isset = other148.__isset; return *this; } void SerDeInfo::printTo(std::ostream& out) const { @@ -4229,15 +4455,15 @@ void swap(Order &a, Order &b) { swap(a.__isset, b.__isset); } -Order::Order(const Order& other147) { - col = other147.col; - order = other147.order; - __isset = other147.__isset; +Order::Order(const Order& other149) { + col = other149.col; + order = other149.order; + __isset = other149.__isset; } -Order& Order::operator=(const Order& other148) { - col = other148.col; - order = other148.order; - __isset = other148.__isset; +Order& Order::operator=(const Order& other150) { + col = other150.col; + order = other150.order; + __isset = other150.__isset; return *this; } void Order::printTo(std::ostream& out) const { @@ -4290,14 +4516,14 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColNames.clear(); - uint32_t _size149; - ::apache::thrift::protocol::TType _etype152; - xfer += iprot->readListBegin(_etype152, _size149); - this->skewedColNames.resize(_size149); - uint32_t _i153; - for (_i153 = 0; _i153 < _size149; ++_i153) + uint32_t _size151; + ::apache::thrift::protocol::TType _etype154; + xfer += iprot->readListBegin(_etype154, _size151); + this->skewedColNames.resize(_size151); + uint32_t _i155; + for (_i155 = 0; _i155 < _size151; ++_i155) { - xfer += iprot->readString(this->skewedColNames[_i153]); + xfer += iprot->readString(this->skewedColNames[_i155]); } xfer += iprot->readListEnd(); } @@ -4310,23 +4536,23 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColValues.clear(); - uint32_t _size154; - ::apache::thrift::protocol::TType _etype157; - xfer += iprot->readListBegin(_etype157, _size154); - this->skewedColValues.resize(_size154); - uint32_t _i158; - for (_i158 = 0; _i158 < _size154; ++_i158) + uint32_t _size156; + ::apache::thrift::protocol::TType _etype159; + xfer += iprot->readListBegin(_etype159, _size156); + this->skewedColValues.resize(_size156); + uint32_t _i160; + for (_i160 = 0; _i160 < _size156; ++_i160) { { - this->skewedColValues[_i158].clear(); - uint32_t _size159; - ::apache::thrift::protocol::TType _etype162; - xfer += iprot->readListBegin(_etype162, _size159); - this->skewedColValues[_i158].resize(_size159); - uint32_t _i163; - for (_i163 = 0; _i163 < _size159; ++_i163) + this->skewedColValues[_i160].clear(); + uint32_t _size161; + ::apache::thrift::protocol::TType _etype164; + xfer += iprot->readListBegin(_etype164, _size161); + this->skewedColValues[_i160].resize(_size161); + uint32_t _i165; + for (_i165 = 0; _i165 < _size161; ++_i165) { - xfer += iprot->readString(this->skewedColValues[_i158][_i163]); + xfer += iprot->readString(this->skewedColValues[_i160][_i165]); } xfer += iprot->readListEnd(); } @@ -4342,29 +4568,29 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->skewedColValueLocationMaps.clear(); - uint32_t _size164; - ::apache::thrift::protocol::TType _ktype165; - ::apache::thrift::protocol::TType _vtype166; - xfer += iprot->readMapBegin(_ktype165, _vtype166, _size164); - uint32_t _i168; - for (_i168 = 0; _i168 < _size164; ++_i168) + uint32_t _size166; + ::apache::thrift::protocol::TType _ktype167; + ::apache::thrift::protocol::TType _vtype168; + xfer += iprot->readMapBegin(_ktype167, _vtype168, _size166); + uint32_t _i170; + for (_i170 = 0; _i170 < _size166; ++_i170) { - std::vector _key169; + std::vector _key171; { - _key169.clear(); - uint32_t _size171; - ::apache::thrift::protocol::TType _etype174; - xfer += iprot->readListBegin(_etype174, _size171); - _key169.resize(_size171); - uint32_t _i175; - for (_i175 = 0; _i175 < _size171; ++_i175) + _key171.clear(); + uint32_t _size173; + ::apache::thrift::protocol::TType _etype176; + xfer += iprot->readListBegin(_etype176, _size173); + _key171.resize(_size173); + uint32_t _i177; + for (_i177 = 0; _i177 < _size173; ++_i177) { - xfer += iprot->readString(_key169[_i175]); + xfer += iprot->readString(_key171[_i177]); } xfer += iprot->readListEnd(); } - std::string& _val170 = this->skewedColValueLocationMaps[_key169]; - xfer += iprot->readString(_val170); + std::string& _val172 = this->skewedColValueLocationMaps[_key171]; + xfer += iprot->readString(_val172); } xfer += iprot->readMapEnd(); } @@ -4393,10 +4619,10 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->skewedColNames.size())); - std::vector ::const_iterator _iter176; - for (_iter176 = this->skewedColNames.begin(); _iter176 != this->skewedColNames.end(); ++_iter176) + std::vector ::const_iterator _iter178; + for (_iter178 = this->skewedColNames.begin(); _iter178 != this->skewedColNames.end(); ++_iter178) { - xfer += oprot->writeString((*_iter176)); + xfer += oprot->writeString((*_iter178)); } xfer += oprot->writeListEnd(); } @@ -4405,15 +4631,15 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast(this->skewedColValues.size())); - std::vector > ::const_iterator _iter177; - for (_iter177 = this->skewedColValues.begin(); _iter177 != this->skewedColValues.end(); ++_iter177) + std::vector > ::const_iterator _iter179; + for (_iter179 = this->skewedColValues.begin(); _iter179 != this->skewedColValues.end(); ++_iter179) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter177).size())); - std::vector ::const_iterator _iter178; - for (_iter178 = (*_iter177).begin(); _iter178 != (*_iter177).end(); ++_iter178) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter179).size())); + std::vector ::const_iterator _iter180; + for (_iter180 = (*_iter179).begin(); _iter180 != (*_iter179).end(); ++_iter180) { - xfer += oprot->writeString((*_iter178)); + xfer += oprot->writeString((*_iter180)); } xfer += oprot->writeListEnd(); } @@ -4425,19 +4651,19 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast(this->skewedColValueLocationMaps.size())); - std::map , std::string> ::const_iterator _iter179; - for (_iter179 = this->skewedColValueLocationMaps.begin(); _iter179 != this->skewedColValueLocationMaps.end(); ++_iter179) + std::map , std::string> ::const_iterator _iter181; + for (_iter181 = this->skewedColValueLocationMaps.begin(); _iter181 != this->skewedColValueLocationMaps.end(); ++_iter181) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter179->first.size())); - std::vector ::const_iterator _iter180; - for (_iter180 = _iter179->first.begin(); _iter180 != _iter179->first.end(); ++_iter180) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter181->first.size())); + std::vector ::const_iterator _iter182; + for (_iter182 = _iter181->first.begin(); _iter182 != _iter181->first.end(); ++_iter182) { - xfer += oprot->writeString((*_iter180)); + xfer += oprot->writeString((*_iter182)); } xfer += oprot->writeListEnd(); } - xfer += oprot->writeString(_iter179->second); + xfer += oprot->writeString(_iter181->second); } xfer += oprot->writeMapEnd(); } @@ -4456,17 +4682,17 @@ void swap(SkewedInfo &a, SkewedInfo &b) { swap(a.__isset, b.__isset); } -SkewedInfo::SkewedInfo(const SkewedInfo& other181) { - skewedColNames = other181.skewedColNames; - skewedColValues = other181.skewedColValues; - skewedColValueLocationMaps = other181.skewedColValueLocationMaps; - __isset = other181.__isset; +SkewedInfo::SkewedInfo(const SkewedInfo& other183) { + skewedColNames = other183.skewedColNames; + skewedColValues = other183.skewedColValues; + skewedColValueLocationMaps = other183.skewedColValueLocationMaps; + __isset = other183.__isset; } -SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other182) { - skewedColNames = other182.skewedColNames; - skewedColValues = other182.skewedColValues; - skewedColValueLocationMaps = other182.skewedColValueLocationMaps; - __isset = other182.__isset; +SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other184) { + skewedColNames = other184.skewedColNames; + skewedColValues = other184.skewedColValues; + skewedColValueLocationMaps = other184.skewedColValueLocationMaps; + __isset = other184.__isset; return *this; } void SkewedInfo::printTo(std::ostream& out) const { @@ -4558,14 +4784,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size183; - ::apache::thrift::protocol::TType _etype186; - xfer += iprot->readListBegin(_etype186, _size183); - this->cols.resize(_size183); - uint32_t _i187; - for (_i187 = 0; _i187 < _size183; ++_i187) + uint32_t _size185; + ::apache::thrift::protocol::TType _etype188; + xfer += iprot->readListBegin(_etype188, _size185); + this->cols.resize(_size185); + uint32_t _i189; + for (_i189 = 0; _i189 < _size185; ++_i189) { - xfer += this->cols[_i187].read(iprot); + xfer += this->cols[_i189].read(iprot); } xfer += iprot->readListEnd(); } @@ -4626,14 +4852,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->bucketCols.clear(); - uint32_t _size188; - ::apache::thrift::protocol::TType _etype191; - xfer += iprot->readListBegin(_etype191, _size188); - this->bucketCols.resize(_size188); - uint32_t _i192; - for (_i192 = 0; _i192 < _size188; ++_i192) + uint32_t _size190; + ::apache::thrift::protocol::TType _etype193; + xfer += iprot->readListBegin(_etype193, _size190); + this->bucketCols.resize(_size190); + uint32_t _i194; + for (_i194 = 0; _i194 < _size190; ++_i194) { - xfer += iprot->readString(this->bucketCols[_i192]); + xfer += iprot->readString(this->bucketCols[_i194]); } xfer += iprot->readListEnd(); } @@ -4646,14 +4872,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->sortCols.clear(); - uint32_t _size193; - ::apache::thrift::protocol::TType _etype196; - xfer += iprot->readListBegin(_etype196, _size193); - this->sortCols.resize(_size193); - uint32_t _i197; - for (_i197 = 0; _i197 < _size193; ++_i197) + uint32_t _size195; + ::apache::thrift::protocol::TType _etype198; + xfer += iprot->readListBegin(_etype198, _size195); + this->sortCols.resize(_size195); + uint32_t _i199; + for (_i199 = 0; _i199 < _size195; ++_i199) { - xfer += this->sortCols[_i197].read(iprot); + xfer += this->sortCols[_i199].read(iprot); } xfer += iprot->readListEnd(); } @@ -4666,17 +4892,17 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size198; - ::apache::thrift::protocol::TType _ktype199; - ::apache::thrift::protocol::TType _vtype200; - xfer += iprot->readMapBegin(_ktype199, _vtype200, _size198); - uint32_t _i202; - for (_i202 = 0; _i202 < _size198; ++_i202) + uint32_t _size200; + ::apache::thrift::protocol::TType _ktype201; + ::apache::thrift::protocol::TType _vtype202; + xfer += iprot->readMapBegin(_ktype201, _vtype202, _size200); + uint32_t _i204; + for (_i204 = 0; _i204 < _size200; ++_i204) { - std::string _key203; - xfer += iprot->readString(_key203); - std::string& _val204 = this->parameters[_key203]; - xfer += iprot->readString(_val204); + std::string _key205; + xfer += iprot->readString(_key205); + std::string& _val206 = this->parameters[_key205]; + xfer += iprot->readString(_val206); } xfer += iprot->readMapEnd(); } @@ -4721,10 +4947,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter205; - for (_iter205 = this->cols.begin(); _iter205 != this->cols.end(); ++_iter205) + std::vector ::const_iterator _iter207; + for (_iter207 = this->cols.begin(); _iter207 != this->cols.end(); ++_iter207) { - xfer += (*_iter205).write(oprot); + xfer += (*_iter207).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4757,10 +4983,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->bucketCols.size())); - std::vector ::const_iterator _iter206; - for (_iter206 = this->bucketCols.begin(); _iter206 != this->bucketCols.end(); ++_iter206) + std::vector ::const_iterator _iter208; + for (_iter208 = this->bucketCols.begin(); _iter208 != this->bucketCols.end(); ++_iter208) { - xfer += oprot->writeString((*_iter206)); + xfer += oprot->writeString((*_iter208)); } xfer += oprot->writeListEnd(); } @@ -4769,10 +4995,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sortCols.size())); - std::vector ::const_iterator _iter207; - for (_iter207 = this->sortCols.begin(); _iter207 != this->sortCols.end(); ++_iter207) + std::vector ::const_iterator _iter209; + for (_iter209 = this->sortCols.begin(); _iter209 != this->sortCols.end(); ++_iter209) { - xfer += (*_iter207).write(oprot); + xfer += (*_iter209).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4781,11 +5007,11 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter208; - for (_iter208 = this->parameters.begin(); _iter208 != this->parameters.end(); ++_iter208) + std::map ::const_iterator _iter210; + for (_iter210 = this->parameters.begin(); _iter210 != this->parameters.end(); ++_iter210) { - xfer += oprot->writeString(_iter208->first); - xfer += oprot->writeString(_iter208->second); + xfer += oprot->writeString(_iter210->first); + xfer += oprot->writeString(_iter210->second); } xfer += oprot->writeMapEnd(); } @@ -4823,35 +5049,35 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.__isset, b.__isset); } -StorageDescriptor::StorageDescriptor(const StorageDescriptor& other209) { - cols = other209.cols; - location = other209.location; - inputFormat = other209.inputFormat; - outputFormat = other209.outputFormat; - compressed = other209.compressed; - numBuckets = other209.numBuckets; - serdeInfo = other209.serdeInfo; - bucketCols = other209.bucketCols; - sortCols = other209.sortCols; - parameters = other209.parameters; - skewedInfo = other209.skewedInfo; - storedAsSubDirectories = other209.storedAsSubDirectories; - __isset = other209.__isset; -} -StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other210) { - cols = other210.cols; - location = other210.location; - inputFormat = other210.inputFormat; - outputFormat = other210.outputFormat; - compressed = other210.compressed; - numBuckets = other210.numBuckets; - serdeInfo = other210.serdeInfo; - bucketCols = other210.bucketCols; - sortCols = other210.sortCols; - parameters = other210.parameters; - skewedInfo = other210.skewedInfo; - storedAsSubDirectories = other210.storedAsSubDirectories; - __isset = other210.__isset; +StorageDescriptor::StorageDescriptor(const StorageDescriptor& other211) { + cols = other211.cols; + location = other211.location; + inputFormat = other211.inputFormat; + outputFormat = other211.outputFormat; + compressed = other211.compressed; + numBuckets = other211.numBuckets; + serdeInfo = other211.serdeInfo; + bucketCols = other211.bucketCols; + sortCols = other211.sortCols; + parameters = other211.parameters; + skewedInfo = other211.skewedInfo; + storedAsSubDirectories = other211.storedAsSubDirectories; + __isset = other211.__isset; +} +StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other212) { + cols = other212.cols; + location = other212.location; + inputFormat = other212.inputFormat; + outputFormat = other212.outputFormat; + compressed = other212.compressed; + numBuckets = other212.numBuckets; + serdeInfo = other212.serdeInfo; + bucketCols = other212.bucketCols; + sortCols = other212.sortCols; + parameters = other212.parameters; + skewedInfo = other212.skewedInfo; + storedAsSubDirectories = other212.storedAsSubDirectories; + __isset = other212.__isset; return *this; } void StorageDescriptor::printTo(std::ostream& out) const { @@ -5026,14 +5252,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size211; - ::apache::thrift::protocol::TType _etype214; - xfer += iprot->readListBegin(_etype214, _size211); - this->partitionKeys.resize(_size211); - uint32_t _i215; - for (_i215 = 0; _i215 < _size211; ++_i215) + uint32_t _size213; + ::apache::thrift::protocol::TType _etype216; + xfer += iprot->readListBegin(_etype216, _size213); + this->partitionKeys.resize(_size213); + uint32_t _i217; + for (_i217 = 0; _i217 < _size213; ++_i217) { - xfer += this->partitionKeys[_i215].read(iprot); + xfer += this->partitionKeys[_i217].read(iprot); } xfer += iprot->readListEnd(); } @@ -5046,17 +5272,17 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size216; - ::apache::thrift::protocol::TType _ktype217; - ::apache::thrift::protocol::TType _vtype218; - xfer += iprot->readMapBegin(_ktype217, _vtype218, _size216); - uint32_t _i220; - for (_i220 = 0; _i220 < _size216; ++_i220) + uint32_t _size218; + ::apache::thrift::protocol::TType _ktype219; + ::apache::thrift::protocol::TType _vtype220; + xfer += iprot->readMapBegin(_ktype219, _vtype220, _size218); + uint32_t _i222; + for (_i222 = 0; _i222 < _size218; ++_i222) { - std::string _key221; - xfer += iprot->readString(_key221); - std::string& _val222 = this->parameters[_key221]; - xfer += iprot->readString(_val222); + std::string _key223; + xfer += iprot->readString(_key223); + std::string& _val224 = this->parameters[_key223]; + xfer += iprot->readString(_val224); } xfer += iprot->readMapEnd(); } @@ -5169,10 +5395,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter223; - for (_iter223 = this->partitionKeys.begin(); _iter223 != this->partitionKeys.end(); ++_iter223) + std::vector ::const_iterator _iter225; + for (_iter225 = this->partitionKeys.begin(); _iter225 != this->partitionKeys.end(); ++_iter225) { - xfer += (*_iter223).write(oprot); + xfer += (*_iter225).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5181,11 +5407,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter224; - for (_iter224 = this->parameters.begin(); _iter224 != this->parameters.end(); ++_iter224) + std::map ::const_iterator _iter226; + for (_iter226 = this->parameters.begin(); _iter226 != this->parameters.end(); ++_iter226) { - xfer += oprot->writeString(_iter224->first); - xfer += oprot->writeString(_iter224->second); + xfer += oprot->writeString(_iter226->first); + xfer += oprot->writeString(_iter226->second); } xfer += oprot->writeMapEnd(); } @@ -5249,43 +5475,43 @@ void swap(Table &a, Table &b) { swap(a.__isset, b.__isset); } -Table::Table(const Table& other225) { - tableName = other225.tableName; - dbName = other225.dbName; - owner = other225.owner; - createTime = other225.createTime; - lastAccessTime = other225.lastAccessTime; - retention = other225.retention; - sd = other225.sd; - partitionKeys = other225.partitionKeys; - parameters = other225.parameters; - viewOriginalText = other225.viewOriginalText; - viewExpandedText = other225.viewExpandedText; - tableType = other225.tableType; - privileges = other225.privileges; - temporary = other225.temporary; - rewriteEnabled = other225.rewriteEnabled; - creationMetadata = other225.creationMetadata; - __isset = other225.__isset; -} -Table& Table::operator=(const Table& other226) { - tableName = other226.tableName; - dbName = other226.dbName; - owner = other226.owner; - createTime = other226.createTime; - lastAccessTime = other226.lastAccessTime; - retention = other226.retention; - sd = other226.sd; - partitionKeys = other226.partitionKeys; - parameters = other226.parameters; - viewOriginalText = other226.viewOriginalText; - viewExpandedText = other226.viewExpandedText; - tableType = other226.tableType; - privileges = other226.privileges; - temporary = other226.temporary; - rewriteEnabled = other226.rewriteEnabled; - creationMetadata = other226.creationMetadata; - __isset = other226.__isset; +Table::Table(const Table& other227) { + tableName = other227.tableName; + dbName = other227.dbName; + owner = other227.owner; + createTime = other227.createTime; + lastAccessTime = other227.lastAccessTime; + retention = other227.retention; + sd = other227.sd; + partitionKeys = other227.partitionKeys; + parameters = other227.parameters; + viewOriginalText = other227.viewOriginalText; + viewExpandedText = other227.viewExpandedText; + tableType = other227.tableType; + privileges = other227.privileges; + temporary = other227.temporary; + rewriteEnabled = other227.rewriteEnabled; + creationMetadata = other227.creationMetadata; + __isset = other227.__isset; +} +Table& Table::operator=(const Table& other228) { + tableName = other228.tableName; + dbName = other228.dbName; + owner = other228.owner; + createTime = other228.createTime; + lastAccessTime = other228.lastAccessTime; + retention = other228.retention; + sd = other228.sd; + partitionKeys = other228.partitionKeys; + parameters = other228.parameters; + viewOriginalText = other228.viewOriginalText; + viewExpandedText = other228.viewExpandedText; + tableType = other228.tableType; + privileges = other228.privileges; + temporary = other228.temporary; + rewriteEnabled = other228.rewriteEnabled; + creationMetadata = other228.creationMetadata; + __isset = other228.__isset; return *this; } void Table::printTo(std::ostream& out) const { @@ -5373,14 +5599,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size227; - ::apache::thrift::protocol::TType _etype230; - xfer += iprot->readListBegin(_etype230, _size227); - this->values.resize(_size227); - uint32_t _i231; - for (_i231 = 0; _i231 < _size227; ++_i231) + uint32_t _size229; + ::apache::thrift::protocol::TType _etype232; + xfer += iprot->readListBegin(_etype232, _size229); + this->values.resize(_size229); + uint32_t _i233; + for (_i233 = 0; _i233 < _size229; ++_i233) { - xfer += iprot->readString(this->values[_i231]); + xfer += iprot->readString(this->values[_i233]); } xfer += iprot->readListEnd(); } @@ -5433,17 +5659,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size232; - ::apache::thrift::protocol::TType _ktype233; - ::apache::thrift::protocol::TType _vtype234; - xfer += iprot->readMapBegin(_ktype233, _vtype234, _size232); - uint32_t _i236; - for (_i236 = 0; _i236 < _size232; ++_i236) + uint32_t _size234; + ::apache::thrift::protocol::TType _ktype235; + ::apache::thrift::protocol::TType _vtype236; + xfer += iprot->readMapBegin(_ktype235, _vtype236, _size234); + uint32_t _i238; + for (_i238 = 0; _i238 < _size234; ++_i238) { - std::string _key237; - xfer += iprot->readString(_key237); - std::string& _val238 = this->parameters[_key237]; - xfer += iprot->readString(_val238); + std::string _key239; + xfer += iprot->readString(_key239); + std::string& _val240 = this->parameters[_key239]; + xfer += iprot->readString(_val240); } xfer += iprot->readMapEnd(); } @@ -5480,10 +5706,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter239; - for (_iter239 = this->values.begin(); _iter239 != this->values.end(); ++_iter239) + std::vector ::const_iterator _iter241; + for (_iter241 = this->values.begin(); _iter241 != this->values.end(); ++_iter241) { - xfer += oprot->writeString((*_iter239)); + xfer += oprot->writeString((*_iter241)); } xfer += oprot->writeListEnd(); } @@ -5512,11 +5738,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter240; - for (_iter240 = this->parameters.begin(); _iter240 != this->parameters.end(); ++_iter240) + std::map ::const_iterator _iter242; + for (_iter242 = this->parameters.begin(); _iter242 != this->parameters.end(); ++_iter242) { - xfer += oprot->writeString(_iter240->first); - xfer += oprot->writeString(_iter240->second); + xfer += oprot->writeString(_iter242->first); + xfer += oprot->writeString(_iter242->second); } xfer += oprot->writeMapEnd(); } @@ -5545,27 +5771,27 @@ void swap(Partition &a, Partition &b) { swap(a.__isset, b.__isset); } -Partition::Partition(const Partition& other241) { - values = other241.values; - dbName = other241.dbName; - tableName = other241.tableName; - createTime = other241.createTime; - lastAccessTime = other241.lastAccessTime; - sd = other241.sd; - parameters = other241.parameters; - privileges = other241.privileges; - __isset = other241.__isset; -} -Partition& Partition::operator=(const Partition& other242) { - values = other242.values; - dbName = other242.dbName; - tableName = other242.tableName; - createTime = other242.createTime; - lastAccessTime = other242.lastAccessTime; - sd = other242.sd; - parameters = other242.parameters; - privileges = other242.privileges; - __isset = other242.__isset; +Partition::Partition(const Partition& other243) { + values = other243.values; + dbName = other243.dbName; + tableName = other243.tableName; + createTime = other243.createTime; + lastAccessTime = other243.lastAccessTime; + sd = other243.sd; + parameters = other243.parameters; + privileges = other243.privileges; + __isset = other243.__isset; +} +Partition& Partition::operator=(const Partition& other244) { + values = other244.values; + dbName = other244.dbName; + tableName = other244.tableName; + createTime = other244.createTime; + lastAccessTime = other244.lastAccessTime; + sd = other244.sd; + parameters = other244.parameters; + privileges = other244.privileges; + __isset = other244.__isset; return *this; } void Partition::printTo(std::ostream& out) const { @@ -5637,14 +5863,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size243; - ::apache::thrift::protocol::TType _etype246; - xfer += iprot->readListBegin(_etype246, _size243); - this->values.resize(_size243); - uint32_t _i247; - for (_i247 = 0; _i247 < _size243; ++_i247) + uint32_t _size245; + ::apache::thrift::protocol::TType _etype248; + xfer += iprot->readListBegin(_etype248, _size245); + this->values.resize(_size245); + uint32_t _i249; + for (_i249 = 0; _i249 < _size245; ++_i249) { - xfer += iprot->readString(this->values[_i247]); + xfer += iprot->readString(this->values[_i249]); } xfer += iprot->readListEnd(); } @@ -5681,17 +5907,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size248; - ::apache::thrift::protocol::TType _ktype249; - ::apache::thrift::protocol::TType _vtype250; - xfer += iprot->readMapBegin(_ktype249, _vtype250, _size248); - uint32_t _i252; - for (_i252 = 0; _i252 < _size248; ++_i252) + uint32_t _size250; + ::apache::thrift::protocol::TType _ktype251; + ::apache::thrift::protocol::TType _vtype252; + xfer += iprot->readMapBegin(_ktype251, _vtype252, _size250); + uint32_t _i254; + for (_i254 = 0; _i254 < _size250; ++_i254) { - std::string _key253; - xfer += iprot->readString(_key253); - std::string& _val254 = this->parameters[_key253]; - xfer += iprot->readString(_val254); + std::string _key255; + xfer += iprot->readString(_key255); + std::string& _val256 = this->parameters[_key255]; + xfer += iprot->readString(_val256); } xfer += iprot->readMapEnd(); } @@ -5728,10 +5954,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter255; - for (_iter255 = this->values.begin(); _iter255 != this->values.end(); ++_iter255) + std::vector ::const_iterator _iter257; + for (_iter257 = this->values.begin(); _iter257 != this->values.end(); ++_iter257) { - xfer += oprot->writeString((*_iter255)); + xfer += oprot->writeString((*_iter257)); } xfer += oprot->writeListEnd(); } @@ -5752,11 +5978,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter256; - for (_iter256 = this->parameters.begin(); _iter256 != this->parameters.end(); ++_iter256) + std::map ::const_iterator _iter258; + for (_iter258 = this->parameters.begin(); _iter258 != this->parameters.end(); ++_iter258) { - xfer += oprot->writeString(_iter256->first); - xfer += oprot->writeString(_iter256->second); + xfer += oprot->writeString(_iter258->first); + xfer += oprot->writeString(_iter258->second); } xfer += oprot->writeMapEnd(); } @@ -5783,23 +6009,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) { swap(a.__isset, b.__isset); } -PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other257) { - values = other257.values; - createTime = other257.createTime; - lastAccessTime = other257.lastAccessTime; - relativePath = other257.relativePath; - parameters = other257.parameters; - privileges = other257.privileges; - __isset = other257.__isset; -} -PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other258) { - values = other258.values; - createTime = other258.createTime; - lastAccessTime = other258.lastAccessTime; - relativePath = other258.relativePath; - parameters = other258.parameters; - privileges = other258.privileges; - __isset = other258.__isset; +PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other259) { + values = other259.values; + createTime = other259.createTime; + lastAccessTime = other259.lastAccessTime; + relativePath = other259.relativePath; + parameters = other259.parameters; + privileges = other259.privileges; + __isset = other259.__isset; +} +PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other260) { + values = other260.values; + createTime = other260.createTime; + lastAccessTime = other260.lastAccessTime; + relativePath = other260.relativePath; + parameters = other260.parameters; + privileges = other260.privileges; + __isset = other260.__isset; return *this; } void PartitionWithoutSD::printTo(std::ostream& out) const { @@ -5852,14 +6078,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size259; - ::apache::thrift::protocol::TType _etype262; - xfer += iprot->readListBegin(_etype262, _size259); - this->partitions.resize(_size259); - uint32_t _i263; - for (_i263 = 0; _i263 < _size259; ++_i263) + uint32_t _size261; + ::apache::thrift::protocol::TType _etype264; + xfer += iprot->readListBegin(_etype264, _size261); + this->partitions.resize(_size261); + uint32_t _i265; + for (_i265 = 0; _i265 < _size261; ++_i265) { - xfer += this->partitions[_i263].read(iprot); + xfer += this->partitions[_i265].read(iprot); } xfer += iprot->readListEnd(); } @@ -5896,10 +6122,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter264; - for (_iter264 = this->partitions.begin(); _iter264 != this->partitions.end(); ++_iter264) + std::vector ::const_iterator _iter266; + for (_iter266 = this->partitions.begin(); _iter266 != this->partitions.end(); ++_iter266) { - xfer += (*_iter264).write(oprot); + xfer += (*_iter266).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5921,15 +6147,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) { swap(a.__isset, b.__isset); } -PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other265) { - partitions = other265.partitions; - sd = other265.sd; - __isset = other265.__isset; +PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other267) { + partitions = other267.partitions; + sd = other267.sd; + __isset = other267.__isset; } -PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other266) { - partitions = other266.partitions; - sd = other266.sd; - __isset = other266.__isset; +PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other268) { + partitions = other268.partitions; + sd = other268.sd; + __isset = other268.__isset; return *this; } void PartitionSpecWithSharedSD::printTo(std::ostream& out) const { @@ -5974,14 +6200,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size267; - ::apache::thrift::protocol::TType _etype270; - xfer += iprot->readListBegin(_etype270, _size267); - this->partitions.resize(_size267); - uint32_t _i271; - for (_i271 = 0; _i271 < _size267; ++_i271) + uint32_t _size269; + ::apache::thrift::protocol::TType _etype272; + xfer += iprot->readListBegin(_etype272, _size269); + this->partitions.resize(_size269); + uint32_t _i273; + for (_i273 = 0; _i273 < _size269; ++_i273) { - xfer += this->partitions[_i271].read(iprot); + xfer += this->partitions[_i273].read(iprot); } xfer += iprot->readListEnd(); } @@ -6010,10 +6236,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter272; - for (_iter272 = this->partitions.begin(); _iter272 != this->partitions.end(); ++_iter272) + std::vector ::const_iterator _iter274; + for (_iter274 = this->partitions.begin(); _iter274 != this->partitions.end(); ++_iter274) { - xfer += (*_iter272).write(oprot); + xfer += (*_iter274).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6030,13 +6256,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) { swap(a.__isset, b.__isset); } -PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other273) { - partitions = other273.partitions; - __isset = other273.__isset; +PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other275) { + partitions = other275.partitions; + __isset = other275.__isset; } -PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other274) { - partitions = other274.partitions; - __isset = other274.__isset; +PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other276) { + partitions = other276.partitions; + __isset = other276.__isset; return *this; } void PartitionListComposingSpec::printTo(std::ostream& out) const { @@ -6188,21 +6414,21 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.__isset, b.__isset); } -PartitionSpec::PartitionSpec(const PartitionSpec& other275) { - dbName = other275.dbName; - tableName = other275.tableName; - rootPath = other275.rootPath; - sharedSDPartitionSpec = other275.sharedSDPartitionSpec; - partitionList = other275.partitionList; - __isset = other275.__isset; -} -PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other276) { - dbName = other276.dbName; - tableName = other276.tableName; - rootPath = other276.rootPath; - sharedSDPartitionSpec = other276.sharedSDPartitionSpec; - partitionList = other276.partitionList; - __isset = other276.__isset; +PartitionSpec::PartitionSpec(const PartitionSpec& other277) { + dbName = other277.dbName; + tableName = other277.tableName; + rootPath = other277.rootPath; + sharedSDPartitionSpec = other277.sharedSDPartitionSpec; + partitionList = other277.partitionList; + __isset = other277.__isset; +} +PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other278) { + dbName = other278.dbName; + tableName = other278.tableName; + rootPath = other278.rootPath; + sharedSDPartitionSpec = other278.sharedSDPartitionSpec; + partitionList = other278.partitionList; + __isset = other278.__isset; return *this; } void PartitionSpec::printTo(std::ostream& out) const { @@ -6350,17 +6576,17 @@ uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size277; - ::apache::thrift::protocol::TType _ktype278; - ::apache::thrift::protocol::TType _vtype279; - xfer += iprot->readMapBegin(_ktype278, _vtype279, _size277); - uint32_t _i281; - for (_i281 = 0; _i281 < _size277; ++_i281) + uint32_t _size279; + ::apache::thrift::protocol::TType _ktype280; + ::apache::thrift::protocol::TType _vtype281; + xfer += iprot->readMapBegin(_ktype280, _vtype281, _size279); + uint32_t _i283; + for (_i283 = 0; _i283 < _size279; ++_i283) { - std::string _key282; - xfer += iprot->readString(_key282); - std::string& _val283 = this->parameters[_key282]; - xfer += iprot->readString(_val283); + std::string _key284; + xfer += iprot->readString(_key284); + std::string& _val285 = this->parameters[_key284]; + xfer += iprot->readString(_val285); } xfer += iprot->readMapEnd(); } @@ -6429,11 +6655,11 @@ uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter284; - for (_iter284 = this->parameters.begin(); _iter284 != this->parameters.end(); ++_iter284) + std::map ::const_iterator _iter286; + for (_iter286 = this->parameters.begin(); _iter286 != this->parameters.end(); ++_iter286) { - xfer += oprot->writeString(_iter284->first); - xfer += oprot->writeString(_iter284->second); + xfer += oprot->writeString(_iter286->first); + xfer += oprot->writeString(_iter286->second); } xfer += oprot->writeMapEnd(); } @@ -6463,31 +6689,31 @@ void swap(Index &a, Index &b) { swap(a.__isset, b.__isset); } -Index::Index(const Index& other285) { - indexName = other285.indexName; - indexHandlerClass = other285.indexHandlerClass; - dbName = other285.dbName; - origTableName = other285.origTableName; - createTime = other285.createTime; - lastAccessTime = other285.lastAccessTime; - indexTableName = other285.indexTableName; - sd = other285.sd; - parameters = other285.parameters; - deferredRebuild = other285.deferredRebuild; - __isset = other285.__isset; -} -Index& Index::operator=(const Index& other286) { - indexName = other286.indexName; - indexHandlerClass = other286.indexHandlerClass; - dbName = other286.dbName; - origTableName = other286.origTableName; - createTime = other286.createTime; - lastAccessTime = other286.lastAccessTime; - indexTableName = other286.indexTableName; - sd = other286.sd; - parameters = other286.parameters; - deferredRebuild = other286.deferredRebuild; - __isset = other286.__isset; +Index::Index(const Index& other287) { + indexName = other287.indexName; + indexHandlerClass = other287.indexHandlerClass; + dbName = other287.dbName; + origTableName = other287.origTableName; + createTime = other287.createTime; + lastAccessTime = other287.lastAccessTime; + indexTableName = other287.indexTableName; + sd = other287.sd; + parameters = other287.parameters; + deferredRebuild = other287.deferredRebuild; + __isset = other287.__isset; +} +Index& Index::operator=(const Index& other288) { + indexName = other288.indexName; + indexHandlerClass = other288.indexHandlerClass; + dbName = other288.dbName; + origTableName = other288.origTableName; + createTime = other288.createTime; + lastAccessTime = other288.lastAccessTime; + indexTableName = other288.indexTableName; + sd = other288.sd; + parameters = other288.parameters; + deferredRebuild = other288.deferredRebuild; + __isset = other288.__isset; return *this; } void Index::printTo(std::ostream& out) const { @@ -6638,19 +6864,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) { swap(a.__isset, b.__isset); } -BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other287) { - numTrues = other287.numTrues; - numFalses = other287.numFalses; - numNulls = other287.numNulls; - bitVectors = other287.bitVectors; - __isset = other287.__isset; +BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other289) { + numTrues = other289.numTrues; + numFalses = other289.numFalses; + numNulls = other289.numNulls; + bitVectors = other289.bitVectors; + __isset = other289.__isset; } -BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other288) { - numTrues = other288.numTrues; - numFalses = other288.numFalses; - numNulls = other288.numNulls; - bitVectors = other288.bitVectors; - __isset = other288.__isset; +BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other290) { + numTrues = other290.numTrues; + numFalses = other290.numFalses; + numNulls = other290.numNulls; + bitVectors = other290.bitVectors; + __isset = other290.__isset; return *this; } void BooleanColumnStatsData::printTo(std::ostream& out) const { @@ -6813,21 +7039,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) { swap(a.__isset, b.__isset); } -DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other289) { - lowValue = other289.lowValue; - highValue = other289.highValue; - numNulls = other289.numNulls; - numDVs = other289.numDVs; - bitVectors = other289.bitVectors; - __isset = other289.__isset; -} -DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other290) { - lowValue = other290.lowValue; - highValue = other290.highValue; - numNulls = other290.numNulls; - numDVs = other290.numDVs; - bitVectors = other290.bitVectors; - __isset = other290.__isset; +DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other291) { + lowValue = other291.lowValue; + highValue = other291.highValue; + numNulls = other291.numNulls; + numDVs = other291.numDVs; + bitVectors = other291.bitVectors; + __isset = other291.__isset; +} +DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other292) { + lowValue = other292.lowValue; + highValue = other292.highValue; + numNulls = other292.numNulls; + numDVs = other292.numDVs; + bitVectors = other292.bitVectors; + __isset = other292.__isset; return *this; } void DoubleColumnStatsData::printTo(std::ostream& out) const { @@ -6991,21 +7217,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) { swap(a.__isset, b.__isset); } -LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other291) { - lowValue = other291.lowValue; - highValue = other291.highValue; - numNulls = other291.numNulls; - numDVs = other291.numDVs; - bitVectors = other291.bitVectors; - __isset = other291.__isset; +LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other293) { + lowValue = other293.lowValue; + highValue = other293.highValue; + numNulls = other293.numNulls; + numDVs = other293.numDVs; + bitVectors = other293.bitVectors; + __isset = other293.__isset; } -LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other292) { - lowValue = other292.lowValue; - highValue = other292.highValue; - numNulls = other292.numNulls; - numDVs = other292.numDVs; - bitVectors = other292.bitVectors; - __isset = other292.__isset; +LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other294) { + lowValue = other294.lowValue; + highValue = other294.highValue; + numNulls = other294.numNulls; + numDVs = other294.numDVs; + bitVectors = other294.bitVectors; + __isset = other294.__isset; return *this; } void LongColumnStatsData::printTo(std::ostream& out) const { @@ -7171,21 +7397,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) { swap(a.__isset, b.__isset); } -StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other293) { - maxColLen = other293.maxColLen; - avgColLen = other293.avgColLen; - numNulls = other293.numNulls; - numDVs = other293.numDVs; - bitVectors = other293.bitVectors; - __isset = other293.__isset; +StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other295) { + maxColLen = other295.maxColLen; + avgColLen = other295.avgColLen; + numNulls = other295.numNulls; + numDVs = other295.numDVs; + bitVectors = other295.bitVectors; + __isset = other295.__isset; } -StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other294) { - maxColLen = other294.maxColLen; - avgColLen = other294.avgColLen; - numNulls = other294.numNulls; - numDVs = other294.numDVs; - bitVectors = other294.bitVectors; - __isset = other294.__isset; +StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other296) { + maxColLen = other296.maxColLen; + avgColLen = other296.avgColLen; + numNulls = other296.numNulls; + numDVs = other296.numDVs; + bitVectors = other296.bitVectors; + __isset = other296.__isset; return *this; } void StringColumnStatsData::printTo(std::ostream& out) const { @@ -7331,19 +7557,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) { swap(a.__isset, b.__isset); } -BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other295) { - maxColLen = other295.maxColLen; - avgColLen = other295.avgColLen; - numNulls = other295.numNulls; - bitVectors = other295.bitVectors; - __isset = other295.__isset; +BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other297) { + maxColLen = other297.maxColLen; + avgColLen = other297.avgColLen; + numNulls = other297.numNulls; + bitVectors = other297.bitVectors; + __isset = other297.__isset; } -BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other296) { - maxColLen = other296.maxColLen; - avgColLen = other296.avgColLen; - numNulls = other296.numNulls; - bitVectors = other296.bitVectors; - __isset = other296.__isset; +BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other298) { + maxColLen = other298.maxColLen; + avgColLen = other298.avgColLen; + numNulls = other298.numNulls; + bitVectors = other298.bitVectors; + __isset = other298.__isset; return *this; } void BinaryColumnStatsData::printTo(std::ostream& out) const { @@ -7448,13 +7674,13 @@ void swap(Decimal &a, Decimal &b) { swap(a.scale, b.scale); } -Decimal::Decimal(const Decimal& other297) { - unscaled = other297.unscaled; - scale = other297.scale; +Decimal::Decimal(const Decimal& other299) { + unscaled = other299.unscaled; + scale = other299.scale; } -Decimal& Decimal::operator=(const Decimal& other298) { - unscaled = other298.unscaled; - scale = other298.scale; +Decimal& Decimal::operator=(const Decimal& other300) { + unscaled = other300.unscaled; + scale = other300.scale; return *this; } void Decimal::printTo(std::ostream& out) const { @@ -7615,21 +7841,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) { swap(a.__isset, b.__isset); } -DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other299) { - lowValue = other299.lowValue; - highValue = other299.highValue; - numNulls = other299.numNulls; - numDVs = other299.numDVs; - bitVectors = other299.bitVectors; - __isset = other299.__isset; -} -DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other300) { - lowValue = other300.lowValue; - highValue = other300.highValue; - numNulls = other300.numNulls; - numDVs = other300.numDVs; - bitVectors = other300.bitVectors; - __isset = other300.__isset; +DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other301) { + lowValue = other301.lowValue; + highValue = other301.highValue; + numNulls = other301.numNulls; + numDVs = other301.numDVs; + bitVectors = other301.bitVectors; + __isset = other301.__isset; +} +DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other302) { + lowValue = other302.lowValue; + highValue = other302.highValue; + numNulls = other302.numNulls; + numDVs = other302.numDVs; + bitVectors = other302.bitVectors; + __isset = other302.__isset; return *this; } void DecimalColumnStatsData::printTo(std::ostream& out) const { @@ -7715,11 +7941,11 @@ void swap(Date &a, Date &b) { swap(a.daysSinceEpoch, b.daysSinceEpoch); } -Date::Date(const Date& other301) { - daysSinceEpoch = other301.daysSinceEpoch; +Date::Date(const Date& other303) { + daysSinceEpoch = other303.daysSinceEpoch; } -Date& Date::operator=(const Date& other302) { - daysSinceEpoch = other302.daysSinceEpoch; +Date& Date::operator=(const Date& other304) { + daysSinceEpoch = other304.daysSinceEpoch; return *this; } void Date::printTo(std::ostream& out) const { @@ -7879,21 +8105,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) { swap(a.__isset, b.__isset); } -DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other303) { - lowValue = other303.lowValue; - highValue = other303.highValue; - numNulls = other303.numNulls; - numDVs = other303.numDVs; - bitVectors = other303.bitVectors; - __isset = other303.__isset; -} -DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other304) { - lowValue = other304.lowValue; - highValue = other304.highValue; - numNulls = other304.numNulls; - numDVs = other304.numDVs; - bitVectors = other304.bitVectors; - __isset = other304.__isset; +DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other305) { + lowValue = other305.lowValue; + highValue = other305.highValue; + numNulls = other305.numNulls; + numDVs = other305.numDVs; + bitVectors = other305.bitVectors; + __isset = other305.__isset; +} +DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other306) { + lowValue = other306.lowValue; + highValue = other306.highValue; + numNulls = other306.numNulls; + numDVs = other306.numDVs; + bitVectors = other306.bitVectors; + __isset = other306.__isset; return *this; } void DateColumnStatsData::printTo(std::ostream& out) const { @@ -8079,25 +8305,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other305) { - booleanStats = other305.booleanStats; - longStats = other305.longStats; - doubleStats = other305.doubleStats; - stringStats = other305.stringStats; - binaryStats = other305.binaryStats; - decimalStats = other305.decimalStats; - dateStats = other305.dateStats; - __isset = other305.__isset; -} -ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other306) { - booleanStats = other306.booleanStats; - longStats = other306.longStats; - doubleStats = other306.doubleStats; - stringStats = other306.stringStats; - binaryStats = other306.binaryStats; - decimalStats = other306.decimalStats; - dateStats = other306.dateStats; - __isset = other306.__isset; +ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other307) { + booleanStats = other307.booleanStats; + longStats = other307.longStats; + doubleStats = other307.doubleStats; + stringStats = other307.stringStats; + binaryStats = other307.binaryStats; + decimalStats = other307.decimalStats; + dateStats = other307.dateStats; + __isset = other307.__isset; +} +ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other308) { + booleanStats = other308.booleanStats; + longStats = other308.longStats; + doubleStats = other308.doubleStats; + stringStats = other308.stringStats; + binaryStats = other308.binaryStats; + decimalStats = other308.decimalStats; + dateStats = other308.dateStats; + __isset = other308.__isset; return *this; } void ColumnStatisticsData::printTo(std::ostream& out) const { @@ -8225,15 +8451,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) { swap(a.statsData, b.statsData); } -ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other307) { - colName = other307.colName; - colType = other307.colType; - statsData = other307.statsData; +ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other309) { + colName = other309.colName; + colType = other309.colType; + statsData = other309.statsData; } -ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other308) { - colName = other308.colName; - colType = other308.colType; - statsData = other308.statsData; +ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other310) { + colName = other310.colName; + colType = other310.colType; + statsData = other310.statsData; return *this; } void ColumnStatisticsObj::printTo(std::ostream& out) const { @@ -8396,21 +8622,21 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other309) { - isTblLevel = other309.isTblLevel; - dbName = other309.dbName; - tableName = other309.tableName; - partName = other309.partName; - lastAnalyzed = other309.lastAnalyzed; - __isset = other309.__isset; -} -ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other310) { - isTblLevel = other310.isTblLevel; - dbName = other310.dbName; - tableName = other310.tableName; - partName = other310.partName; - lastAnalyzed = other310.lastAnalyzed; - __isset = other310.__isset; +ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other311) { + isTblLevel = other311.isTblLevel; + dbName = other311.dbName; + tableName = other311.tableName; + partName = other311.partName; + lastAnalyzed = other311.lastAnalyzed; + __isset = other311.__isset; +} +ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other312) { + isTblLevel = other312.isTblLevel; + dbName = other312.dbName; + tableName = other312.tableName; + partName = other312.partName; + lastAnalyzed = other312.lastAnalyzed; + __isset = other312.__isset; return *this; } void ColumnStatisticsDesc::printTo(std::ostream& out) const { @@ -8472,14 +8698,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->statsObj.clear(); - uint32_t _size311; - ::apache::thrift::protocol::TType _etype314; - xfer += iprot->readListBegin(_etype314, _size311); - this->statsObj.resize(_size311); - uint32_t _i315; - for (_i315 = 0; _i315 < _size311; ++_i315) + uint32_t _size313; + ::apache::thrift::protocol::TType _etype316; + xfer += iprot->readListBegin(_etype316, _size313); + this->statsObj.resize(_size313); + uint32_t _i317; + for (_i317 = 0; _i317 < _size313; ++_i317) { - xfer += this->statsObj[_i315].read(iprot); + xfer += this->statsObj[_i317].read(iprot); } xfer += iprot->readListEnd(); } @@ -8516,10 +8742,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); - std::vector ::const_iterator _iter316; - for (_iter316 = this->statsObj.begin(); _iter316 != this->statsObj.end(); ++_iter316) + std::vector ::const_iterator _iter318; + for (_iter318 = this->statsObj.begin(); _iter318 != this->statsObj.end(); ++_iter318) { - xfer += (*_iter316).write(oprot); + xfer += (*_iter318).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8536,13 +8762,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { swap(a.statsObj, b.statsObj); } -ColumnStatistics::ColumnStatistics(const ColumnStatistics& other317) { - statsDesc = other317.statsDesc; - statsObj = other317.statsObj; +ColumnStatistics::ColumnStatistics(const ColumnStatistics& other319) { + statsDesc = other319.statsDesc; + statsObj = other319.statsObj; } -ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other318) { - statsDesc = other318.statsDesc; - statsObj = other318.statsObj; +ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other320) { + statsDesc = other320.statsDesc; + statsObj = other320.statsObj; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -8593,14 +8819,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size319; - ::apache::thrift::protocol::TType _etype322; - xfer += iprot->readListBegin(_etype322, _size319); - this->colStats.resize(_size319); - uint32_t _i323; - for (_i323 = 0; _i323 < _size319; ++_i323) + uint32_t _size321; + ::apache::thrift::protocol::TType _etype324; + xfer += iprot->readListBegin(_etype324, _size321); + this->colStats.resize(_size321); + uint32_t _i325; + for (_i325 = 0; _i325 < _size321; ++_i325) { - xfer += this->colStats[_i323].read(iprot); + xfer += this->colStats[_i325].read(iprot); } xfer += iprot->readListEnd(); } @@ -8641,10 +8867,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter324; - for (_iter324 = this->colStats.begin(); _iter324 != this->colStats.end(); ++_iter324) + std::vector ::const_iterator _iter326; + for (_iter326 = this->colStats.begin(); _iter326 != this->colStats.end(); ++_iter326) { - xfer += (*_iter324).write(oprot); + xfer += (*_iter326).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8665,13 +8891,13 @@ void swap(AggrStats &a, AggrStats &b) { swap(a.partsFound, b.partsFound); } -AggrStats::AggrStats(const AggrStats& other325) { - colStats = other325.colStats; - partsFound = other325.partsFound; +AggrStats::AggrStats(const AggrStats& other327) { + colStats = other327.colStats; + partsFound = other327.partsFound; } -AggrStats& AggrStats::operator=(const AggrStats& other326) { - colStats = other326.colStats; - partsFound = other326.partsFound; +AggrStats& AggrStats::operator=(const AggrStats& other328) { + colStats = other328.colStats; + partsFound = other328.partsFound; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -8722,14 +8948,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size327; - ::apache::thrift::protocol::TType _etype330; - xfer += iprot->readListBegin(_etype330, _size327); - this->colStats.resize(_size327); - uint32_t _i331; - for (_i331 = 0; _i331 < _size327; ++_i331) + uint32_t _size329; + ::apache::thrift::protocol::TType _etype332; + xfer += iprot->readListBegin(_etype332, _size329); + this->colStats.resize(_size329); + uint32_t _i333; + for (_i333 = 0; _i333 < _size329; ++_i333) { - xfer += this->colStats[_i331].read(iprot); + xfer += this->colStats[_i333].read(iprot); } xfer += iprot->readListEnd(); } @@ -8768,10 +8994,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter332; - for (_iter332 = this->colStats.begin(); _iter332 != this->colStats.end(); ++_iter332) + std::vector ::const_iterator _iter334; + for (_iter334 = this->colStats.begin(); _iter334 != this->colStats.end(); ++_iter334) { - xfer += (*_iter332).write(oprot); + xfer += (*_iter334).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8794,15 +9020,15 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { swap(a.__isset, b.__isset); } -SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other333) { - colStats = other333.colStats; - needMerge = other333.needMerge; - __isset = other333.__isset; +SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other335) { + colStats = other335.colStats; + needMerge = other335.needMerge; + __isset = other335.__isset; } -SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other334) { - colStats = other334.colStats; - needMerge = other334.needMerge; - __isset = other334.__isset; +SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other336) { + colStats = other336.colStats; + needMerge = other336.needMerge; + __isset = other336.__isset; return *this; } void SetPartitionsStatsRequest::printTo(std::ostream& out) const { @@ -8851,14 +9077,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size335; - ::apache::thrift::protocol::TType _etype338; - xfer += iprot->readListBegin(_etype338, _size335); - this->fieldSchemas.resize(_size335); - uint32_t _i339; - for (_i339 = 0; _i339 < _size335; ++_i339) + uint32_t _size337; + ::apache::thrift::protocol::TType _etype340; + xfer += iprot->readListBegin(_etype340, _size337); + this->fieldSchemas.resize(_size337); + uint32_t _i341; + for (_i341 = 0; _i341 < _size337; ++_i341) { - xfer += this->fieldSchemas[_i339].read(iprot); + xfer += this->fieldSchemas[_i341].read(iprot); } xfer += iprot->readListEnd(); } @@ -8871,17 +9097,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size340; - ::apache::thrift::protocol::TType _ktype341; - ::apache::thrift::protocol::TType _vtype342; - xfer += iprot->readMapBegin(_ktype341, _vtype342, _size340); - uint32_t _i344; - for (_i344 = 0; _i344 < _size340; ++_i344) + uint32_t _size342; + ::apache::thrift::protocol::TType _ktype343; + ::apache::thrift::protocol::TType _vtype344; + xfer += iprot->readMapBegin(_ktype343, _vtype344, _size342); + uint32_t _i346; + for (_i346 = 0; _i346 < _size342; ++_i346) { - std::string _key345; - xfer += iprot->readString(_key345); - std::string& _val346 = this->properties[_key345]; - xfer += iprot->readString(_val346); + std::string _key347; + xfer += iprot->readString(_key347); + std::string& _val348 = this->properties[_key347]; + xfer += iprot->readString(_val348); } xfer += iprot->readMapEnd(); } @@ -8910,10 +9136,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter347; - for (_iter347 = this->fieldSchemas.begin(); _iter347 != this->fieldSchemas.end(); ++_iter347) + std::vector ::const_iterator _iter349; + for (_iter349 = this->fieldSchemas.begin(); _iter349 != this->fieldSchemas.end(); ++_iter349) { - xfer += (*_iter347).write(oprot); + xfer += (*_iter349).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8922,11 +9148,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter348; - for (_iter348 = this->properties.begin(); _iter348 != this->properties.end(); ++_iter348) + std::map ::const_iterator _iter350; + for (_iter350 = this->properties.begin(); _iter350 != this->properties.end(); ++_iter350) { - xfer += oprot->writeString(_iter348->first); - xfer += oprot->writeString(_iter348->second); + xfer += oprot->writeString(_iter350->first); + xfer += oprot->writeString(_iter350->second); } xfer += oprot->writeMapEnd(); } @@ -8944,15 +9170,15 @@ void swap(Schema &a, Schema &b) { swap(a.__isset, b.__isset); } -Schema::Schema(const Schema& other349) { - fieldSchemas = other349.fieldSchemas; - properties = other349.properties; - __isset = other349.__isset; +Schema::Schema(const Schema& other351) { + fieldSchemas = other351.fieldSchemas; + properties = other351.properties; + __isset = other351.__isset; } -Schema& Schema::operator=(const Schema& other350) { - fieldSchemas = other350.fieldSchemas; - properties = other350.properties; - __isset = other350.__isset; +Schema& Schema::operator=(const Schema& other352) { + fieldSchemas = other352.fieldSchemas; + properties = other352.properties; + __isset = other352.__isset; return *this; } void Schema::printTo(std::ostream& out) const { @@ -8997,17 +9223,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size351; - ::apache::thrift::protocol::TType _ktype352; - ::apache::thrift::protocol::TType _vtype353; - xfer += iprot->readMapBegin(_ktype352, _vtype353, _size351); - uint32_t _i355; - for (_i355 = 0; _i355 < _size351; ++_i355) + uint32_t _size353; + ::apache::thrift::protocol::TType _ktype354; + ::apache::thrift::protocol::TType _vtype355; + xfer += iprot->readMapBegin(_ktype354, _vtype355, _size353); + uint32_t _i357; + for (_i357 = 0; _i357 < _size353; ++_i357) { - std::string _key356; - xfer += iprot->readString(_key356); - std::string& _val357 = this->properties[_key356]; - xfer += iprot->readString(_val357); + std::string _key358; + xfer += iprot->readString(_key358); + std::string& _val359 = this->properties[_key358]; + xfer += iprot->readString(_val359); } xfer += iprot->readMapEnd(); } @@ -9036,11 +9262,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter358; - for (_iter358 = this->properties.begin(); _iter358 != this->properties.end(); ++_iter358) + std::map ::const_iterator _iter360; + for (_iter360 = this->properties.begin(); _iter360 != this->properties.end(); ++_iter360) { - xfer += oprot->writeString(_iter358->first); - xfer += oprot->writeString(_iter358->second); + xfer += oprot->writeString(_iter360->first); + xfer += oprot->writeString(_iter360->second); } xfer += oprot->writeMapEnd(); } @@ -9057,13 +9283,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) { swap(a.__isset, b.__isset); } -EnvironmentContext::EnvironmentContext(const EnvironmentContext& other359) { - properties = other359.properties; - __isset = other359.__isset; +EnvironmentContext::EnvironmentContext(const EnvironmentContext& other361) { + properties = other361.properties; + __isset = other361.__isset; } -EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other360) { - properties = other360.properties; - __isset = other360.__isset; +EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other362) { + properties = other362.properties; + __isset = other362.__isset; return *this; } void EnvironmentContext::printTo(std::ostream& out) const { @@ -9165,13 +9391,13 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) { swap(a.tbl_name, b.tbl_name); } -PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other361) { - db_name = other361.db_name; - tbl_name = other361.tbl_name; +PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other363) { + db_name = other363.db_name; + tbl_name = other363.tbl_name; } -PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other362) { - db_name = other362.db_name; - tbl_name = other362.tbl_name; +PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other364) { + db_name = other364.db_name; + tbl_name = other364.tbl_name; return *this; } void PrimaryKeysRequest::printTo(std::ostream& out) const { @@ -9217,14 +9443,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size363; - ::apache::thrift::protocol::TType _etype366; - xfer += iprot->readListBegin(_etype366, _size363); - this->primaryKeys.resize(_size363); - uint32_t _i367; - for (_i367 = 0; _i367 < _size363; ++_i367) + uint32_t _size365; + ::apache::thrift::protocol::TType _etype368; + xfer += iprot->readListBegin(_etype368, _size365); + this->primaryKeys.resize(_size365); + uint32_t _i369; + for (_i369 = 0; _i369 < _size365; ++_i369) { - xfer += this->primaryKeys[_i367].read(iprot); + xfer += this->primaryKeys[_i369].read(iprot); } xfer += iprot->readListEnd(); } @@ -9255,10 +9481,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter368; - for (_iter368 = this->primaryKeys.begin(); _iter368 != this->primaryKeys.end(); ++_iter368) + std::vector ::const_iterator _iter370; + for (_iter370 = this->primaryKeys.begin(); _iter370 != this->primaryKeys.end(); ++_iter370) { - xfer += (*_iter368).write(oprot); + xfer += (*_iter370).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9274,11 +9500,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) { swap(a.primaryKeys, b.primaryKeys); } -PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other369) { - primaryKeys = other369.primaryKeys; +PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other371) { + primaryKeys = other371.primaryKeys; } -PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other370) { - primaryKeys = other370.primaryKeys; +PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other372) { + primaryKeys = other372.primaryKeys; return *this; } void PrimaryKeysResponse::printTo(std::ostream& out) const { @@ -9409,19 +9635,19 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) { swap(a.__isset, b.__isset); } -ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other371) { - parent_db_name = other371.parent_db_name; - parent_tbl_name = other371.parent_tbl_name; - foreign_db_name = other371.foreign_db_name; - foreign_tbl_name = other371.foreign_tbl_name; - __isset = other371.__isset; +ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other373) { + parent_db_name = other373.parent_db_name; + parent_tbl_name = other373.parent_tbl_name; + foreign_db_name = other373.foreign_db_name; + foreign_tbl_name = other373.foreign_tbl_name; + __isset = other373.__isset; } -ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other372) { - parent_db_name = other372.parent_db_name; - parent_tbl_name = other372.parent_tbl_name; - foreign_db_name = other372.foreign_db_name; - foreign_tbl_name = other372.foreign_tbl_name; - __isset = other372.__isset; +ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other374) { + parent_db_name = other374.parent_db_name; + parent_tbl_name = other374.parent_tbl_name; + foreign_db_name = other374.foreign_db_name; + foreign_tbl_name = other374.foreign_tbl_name; + __isset = other374.__isset; return *this; } void ForeignKeysRequest::printTo(std::ostream& out) const { @@ -9469,14 +9695,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size373; - ::apache::thrift::protocol::TType _etype376; - xfer += iprot->readListBegin(_etype376, _size373); - this->foreignKeys.resize(_size373); - uint32_t _i377; - for (_i377 = 0; _i377 < _size373; ++_i377) + uint32_t _size375; + ::apache::thrift::protocol::TType _etype378; + xfer += iprot->readListBegin(_etype378, _size375); + this->foreignKeys.resize(_size375); + uint32_t _i379; + for (_i379 = 0; _i379 < _size375; ++_i379) { - xfer += this->foreignKeys[_i377].read(iprot); + xfer += this->foreignKeys[_i379].read(iprot); } xfer += iprot->readListEnd(); } @@ -9507,10 +9733,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter378; - for (_iter378 = this->foreignKeys.begin(); _iter378 != this->foreignKeys.end(); ++_iter378) + std::vector ::const_iterator _iter380; + for (_iter380 = this->foreignKeys.begin(); _iter380 != this->foreignKeys.end(); ++_iter380) { - xfer += (*_iter378).write(oprot); + xfer += (*_iter380).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9526,11 +9752,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) { swap(a.foreignKeys, b.foreignKeys); } -ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other379) { - foreignKeys = other379.foreignKeys; +ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other381) { + foreignKeys = other381.foreignKeys; } -ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other380) { - foreignKeys = other380.foreignKeys; +ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other382) { + foreignKeys = other382.foreignKeys; return *this; } void ForeignKeysResponse::printTo(std::ostream& out) const { @@ -9632,13 +9858,13 @@ void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) { swap(a.tbl_name, b.tbl_name); } -UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other381) { - db_name = other381.db_name; - tbl_name = other381.tbl_name; +UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other383) { + db_name = other383.db_name; + tbl_name = other383.tbl_name; } -UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other382) { - db_name = other382.db_name; - tbl_name = other382.tbl_name; +UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other384) { + db_name = other384.db_name; + tbl_name = other384.tbl_name; return *this; } void UniqueConstraintsRequest::printTo(std::ostream& out) const { @@ -9684,14 +9910,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size383; - ::apache::thrift::protocol::TType _etype386; - xfer += iprot->readListBegin(_etype386, _size383); - this->uniqueConstraints.resize(_size383); - uint32_t _i387; - for (_i387 = 0; _i387 < _size383; ++_i387) + uint32_t _size385; + ::apache::thrift::protocol::TType _etype388; + xfer += iprot->readListBegin(_etype388, _size385); + this->uniqueConstraints.resize(_size385); + uint32_t _i389; + for (_i389 = 0; _i389 < _size385; ++_i389) { - xfer += this->uniqueConstraints[_i387].read(iprot); + xfer += this->uniqueConstraints[_i389].read(iprot); } xfer += iprot->readListEnd(); } @@ -9722,10 +9948,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter388; - for (_iter388 = this->uniqueConstraints.begin(); _iter388 != this->uniqueConstraints.end(); ++_iter388) + std::vector ::const_iterator _iter390; + for (_iter390 = this->uniqueConstraints.begin(); _iter390 != this->uniqueConstraints.end(); ++_iter390) { - xfer += (*_iter388).write(oprot); + xfer += (*_iter390).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9741,11 +9967,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) { swap(a.uniqueConstraints, b.uniqueConstraints); } -UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other389) { - uniqueConstraints = other389.uniqueConstraints; +UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other391) { + uniqueConstraints = other391.uniqueConstraints; } -UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other390) { - uniqueConstraints = other390.uniqueConstraints; +UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other392) { + uniqueConstraints = other392.uniqueConstraints; return *this; } void UniqueConstraintsResponse::printTo(std::ostream& out) const { @@ -9847,13 +10073,13 @@ void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) { swap(a.tbl_name, b.tbl_name); } -NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other391) { - db_name = other391.db_name; - tbl_name = other391.tbl_name; +NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other393) { + db_name = other393.db_name; + tbl_name = other393.tbl_name; } -NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other392) { - db_name = other392.db_name; - tbl_name = other392.tbl_name; +NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other394) { + db_name = other394.db_name; + tbl_name = other394.tbl_name; return *this; } void NotNullConstraintsRequest::printTo(std::ostream& out) const { @@ -9899,14 +10125,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size393; - ::apache::thrift::protocol::TType _etype396; - xfer += iprot->readListBegin(_etype396, _size393); - this->notNullConstraints.resize(_size393); - uint32_t _i397; - for (_i397 = 0; _i397 < _size393; ++_i397) + uint32_t _size395; + ::apache::thrift::protocol::TType _etype398; + xfer += iprot->readListBegin(_etype398, _size395); + this->notNullConstraints.resize(_size395); + uint32_t _i399; + for (_i399 = 0; _i399 < _size395; ++_i399) { - xfer += this->notNullConstraints[_i397].read(iprot); + xfer += this->notNullConstraints[_i399].read(iprot); } xfer += iprot->readListEnd(); } @@ -9937,10 +10163,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter398; - for (_iter398 = this->notNullConstraints.begin(); _iter398 != this->notNullConstraints.end(); ++_iter398) + std::vector ::const_iterator _iter400; + for (_iter400 = this->notNullConstraints.begin(); _iter400 != this->notNullConstraints.end(); ++_iter400) { - xfer += (*_iter398).write(oprot); + xfer += (*_iter400).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9956,11 +10182,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) { swap(a.notNullConstraints, b.notNullConstraints); } -NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other399) { - notNullConstraints = other399.notNullConstraints; +NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other401) { + notNullConstraints = other401.notNullConstraints; } -NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other400) { - notNullConstraints = other400.notNullConstraints; +NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other402) { + notNullConstraints = other402.notNullConstraints; return *this; } void NotNullConstraintsResponse::printTo(std::ostream& out) const { @@ -9971,6 +10197,221 @@ void NotNullConstraintsResponse::printTo(std::ostream& out) const { } +DefaultConstraintsRequest::~DefaultConstraintsRequest() throw() { +} + + +void DefaultConstraintsRequest::__set_db_name(const std::string& val) { + this->db_name = val; +} + +void DefaultConstraintsRequest::__set_tbl_name(const std::string& val) { + this->tbl_name = val; +} + +uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_db_name = false; + bool isset_tbl_name = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + isset_db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + isset_tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_db_name) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tbl_name) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t DefaultConstraintsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("DefaultConstraintsRequest"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(DefaultConstraintsRequest &a, DefaultConstraintsRequest &b) { + using ::std::swap; + swap(a.db_name, b.db_name); + swap(a.tbl_name, b.tbl_name); +} + +DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other403) { + db_name = other403.db_name; + tbl_name = other403.tbl_name; +} +DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other404) { + db_name = other404.db_name; + tbl_name = other404.tbl_name; + return *this; +} +void DefaultConstraintsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "DefaultConstraintsRequest("; + out << "db_name=" << to_string(db_name); + out << ", " << "tbl_name=" << to_string(tbl_name); + out << ")"; +} + + +DefaultConstraintsResponse::~DefaultConstraintsResponse() throw() { +} + + +void DefaultConstraintsResponse::__set_defaultConstraints(const std::vector & val) { + this->defaultConstraints = val; +} + +uint32_t DefaultConstraintsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_defaultConstraints = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->defaultConstraints.clear(); + uint32_t _size405; + ::apache::thrift::protocol::TType _etype408; + xfer += iprot->readListBegin(_etype408, _size405); + this->defaultConstraints.resize(_size405); + uint32_t _i409; + for (_i409 = 0; _i409 < _size405; ++_i409) + { + xfer += this->defaultConstraints[_i409].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_defaultConstraints = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_defaultConstraints) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t DefaultConstraintsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("DefaultConstraintsResponse"); + + xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); + std::vector ::const_iterator _iter410; + for (_iter410 = this->defaultConstraints.begin(); _iter410 != this->defaultConstraints.end(); ++_iter410) + { + xfer += (*_iter410).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(DefaultConstraintsResponse &a, DefaultConstraintsResponse &b) { + using ::std::swap; + swap(a.defaultConstraints, b.defaultConstraints); +} + +DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other411) { + defaultConstraints = other411.defaultConstraints; +} +DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other412) { + defaultConstraints = other412.defaultConstraints; + return *this; +} +void DefaultConstraintsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "DefaultConstraintsResponse("; + out << "defaultConstraints=" << to_string(defaultConstraints); + out << ")"; +} + + DropConstraintRequest::~DropConstraintRequest() throw() { } @@ -10082,15 +10523,15 @@ void swap(DropConstraintRequest &a, DropConstraintRequest &b) { swap(a.constraintname, b.constraintname); } -DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other401) { - dbname = other401.dbname; - tablename = other401.tablename; - constraintname = other401.constraintname; +DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other413) { + dbname = other413.dbname; + tablename = other413.tablename; + constraintname = other413.constraintname; } -DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other402) { - dbname = other402.dbname; - tablename = other402.tablename; - constraintname = other402.constraintname; +DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other414) { + dbname = other414.dbname; + tablename = other414.tablename; + constraintname = other414.constraintname; return *this; } void DropConstraintRequest::printTo(std::ostream& out) const { @@ -10137,14 +10578,14 @@ uint32_t AddPrimaryKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeyCols.clear(); - uint32_t _size403; - ::apache::thrift::protocol::TType _etype406; - xfer += iprot->readListBegin(_etype406, _size403); - this->primaryKeyCols.resize(_size403); - uint32_t _i407; - for (_i407 = 0; _i407 < _size403; ++_i407) + uint32_t _size415; + ::apache::thrift::protocol::TType _etype418; + xfer += iprot->readListBegin(_etype418, _size415); + this->primaryKeyCols.resize(_size415); + uint32_t _i419; + for (_i419 = 0; _i419 < _size415; ++_i419) { - xfer += this->primaryKeyCols[_i407].read(iprot); + xfer += this->primaryKeyCols[_i419].read(iprot); } xfer += iprot->readListEnd(); } @@ -10175,10 +10616,10 @@ uint32_t AddPrimaryKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("primaryKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeyCols.size())); - std::vector ::const_iterator _iter408; - for (_iter408 = this->primaryKeyCols.begin(); _iter408 != this->primaryKeyCols.end(); ++_iter408) + std::vector ::const_iterator _iter420; + for (_iter420 = this->primaryKeyCols.begin(); _iter420 != this->primaryKeyCols.end(); ++_iter420) { - xfer += (*_iter408).write(oprot); + xfer += (*_iter420).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10194,11 +10635,11 @@ void swap(AddPrimaryKeyRequest &a, AddPrimaryKeyRequest &b) { swap(a.primaryKeyCols, b.primaryKeyCols); } -AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other409) { - primaryKeyCols = other409.primaryKeyCols; +AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other421) { + primaryKeyCols = other421.primaryKeyCols; } -AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other410) { - primaryKeyCols = other410.primaryKeyCols; +AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other422) { + primaryKeyCols = other422.primaryKeyCols; return *this; } void AddPrimaryKeyRequest::printTo(std::ostream& out) const { @@ -10243,14 +10684,14 @@ uint32_t AddForeignKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeyCols.clear(); - uint32_t _size411; - ::apache::thrift::protocol::TType _etype414; - xfer += iprot->readListBegin(_etype414, _size411); - this->foreignKeyCols.resize(_size411); - uint32_t _i415; - for (_i415 = 0; _i415 < _size411; ++_i415) + uint32_t _size423; + ::apache::thrift::protocol::TType _etype426; + xfer += iprot->readListBegin(_etype426, _size423); + this->foreignKeyCols.resize(_size423); + uint32_t _i427; + for (_i427 = 0; _i427 < _size423; ++_i427) { - xfer += this->foreignKeyCols[_i415].read(iprot); + xfer += this->foreignKeyCols[_i427].read(iprot); } xfer += iprot->readListEnd(); } @@ -10281,10 +10722,10 @@ uint32_t AddForeignKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("foreignKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeyCols.size())); - std::vector ::const_iterator _iter416; - for (_iter416 = this->foreignKeyCols.begin(); _iter416 != this->foreignKeyCols.end(); ++_iter416) + std::vector ::const_iterator _iter428; + for (_iter428 = this->foreignKeyCols.begin(); _iter428 != this->foreignKeyCols.end(); ++_iter428) { - xfer += (*_iter416).write(oprot); + xfer += (*_iter428).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10300,11 +10741,11 @@ void swap(AddForeignKeyRequest &a, AddForeignKeyRequest &b) { swap(a.foreignKeyCols, b.foreignKeyCols); } -AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other417) { - foreignKeyCols = other417.foreignKeyCols; +AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other429) { + foreignKeyCols = other429.foreignKeyCols; } -AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other418) { - foreignKeyCols = other418.foreignKeyCols; +AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other430) { + foreignKeyCols = other430.foreignKeyCols; return *this; } void AddForeignKeyRequest::printTo(std::ostream& out) const { @@ -10315,15 +10756,121 @@ void AddForeignKeyRequest::printTo(std::ostream& out) const { } -AddUniqueConstraintRequest::~AddUniqueConstraintRequest() throw() { +AddUniqueConstraintRequest::~AddUniqueConstraintRequest() throw() { +} + + +void AddUniqueConstraintRequest::__set_uniqueConstraintCols(const std::vector & val) { + this->uniqueConstraintCols = val; +} + +uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_uniqueConstraintCols = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->uniqueConstraintCols.clear(); + uint32_t _size431; + ::apache::thrift::protocol::TType _etype434; + xfer += iprot->readListBegin(_etype434, _size431); + this->uniqueConstraintCols.resize(_size431); + uint32_t _i435; + for (_i435 = 0; _i435 < _size431; ++_i435) + { + xfer += this->uniqueConstraintCols[_i435].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_uniqueConstraintCols = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_uniqueConstraintCols) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("AddUniqueConstraintRequest"); + + xfer += oprot->writeFieldBegin("uniqueConstraintCols", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraintCols.size())); + std::vector ::const_iterator _iter436; + for (_iter436 = this->uniqueConstraintCols.begin(); _iter436 != this->uniqueConstraintCols.end(); ++_iter436) + { + xfer += (*_iter436).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(AddUniqueConstraintRequest &a, AddUniqueConstraintRequest &b) { + using ::std::swap; + swap(a.uniqueConstraintCols, b.uniqueConstraintCols); +} + +AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other437) { + uniqueConstraintCols = other437.uniqueConstraintCols; +} +AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other438) { + uniqueConstraintCols = other438.uniqueConstraintCols; + return *this; +} +void AddUniqueConstraintRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "AddUniqueConstraintRequest("; + out << "uniqueConstraintCols=" << to_string(uniqueConstraintCols); + out << ")"; +} + + +AddNotNullConstraintRequest::~AddNotNullConstraintRequest() throw() { } -void AddUniqueConstraintRequest::__set_uniqueConstraintCols(const std::vector & val) { - this->uniqueConstraintCols = val; +void AddNotNullConstraintRequest::__set_notNullConstraintCols(const std::vector & val) { + this->notNullConstraintCols = val; } -uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -10335,7 +10882,7 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* using ::apache::thrift::protocol::TProtocolException; - bool isset_uniqueConstraintCols = false; + bool isset_notNullConstraintCols = false; while (true) { @@ -10348,19 +10895,19 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* case 1: if (ftype == ::apache::thrift::protocol::T_LIST) { { - this->uniqueConstraintCols.clear(); - uint32_t _size419; - ::apache::thrift::protocol::TType _etype422; - xfer += iprot->readListBegin(_etype422, _size419); - this->uniqueConstraintCols.resize(_size419); - uint32_t _i423; - for (_i423 = 0; _i423 < _size419; ++_i423) + this->notNullConstraintCols.clear(); + uint32_t _size439; + ::apache::thrift::protocol::TType _etype442; + xfer += iprot->readListBegin(_etype442, _size439); + this->notNullConstraintCols.resize(_size439); + uint32_t _i443; + for (_i443 = 0; _i443 < _size439; ++_i443) { - xfer += this->uniqueConstraintCols[_i423].read(iprot); + xfer += this->notNullConstraintCols[_i443].read(iprot); } xfer += iprot->readListEnd(); } - isset_uniqueConstraintCols = true; + isset_notNullConstraintCols = true; } else { xfer += iprot->skip(ftype); } @@ -10374,23 +10921,23 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->readStructEnd(); - if (!isset_uniqueConstraintCols) + if (!isset_notNullConstraintCols) throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } -uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("AddUniqueConstraintRequest"); + xfer += oprot->writeStructBegin("AddNotNullConstraintRequest"); - xfer += oprot->writeFieldBegin("uniqueConstraintCols", ::apache::thrift::protocol::T_LIST, 1); + xfer += oprot->writeFieldBegin("notNullConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraintCols.size())); - std::vector ::const_iterator _iter424; - for (_iter424 = this->uniqueConstraintCols.begin(); _iter424 != this->uniqueConstraintCols.end(); ++_iter424) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraintCols.size())); + std::vector ::const_iterator _iter444; + for (_iter444 = this->notNullConstraintCols.begin(); _iter444 != this->notNullConstraintCols.end(); ++_iter444) { - xfer += (*_iter424).write(oprot); + xfer += (*_iter444).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10401,35 +10948,35 @@ uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol return xfer; } -void swap(AddUniqueConstraintRequest &a, AddUniqueConstraintRequest &b) { +void swap(AddNotNullConstraintRequest &a, AddNotNullConstraintRequest &b) { using ::std::swap; - swap(a.uniqueConstraintCols, b.uniqueConstraintCols); + swap(a.notNullConstraintCols, b.notNullConstraintCols); } -AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other425) { - uniqueConstraintCols = other425.uniqueConstraintCols; +AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other445) { + notNullConstraintCols = other445.notNullConstraintCols; } -AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other426) { - uniqueConstraintCols = other426.uniqueConstraintCols; +AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other446) { + notNullConstraintCols = other446.notNullConstraintCols; return *this; } -void AddUniqueConstraintRequest::printTo(std::ostream& out) const { +void AddNotNullConstraintRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "AddUniqueConstraintRequest("; - out << "uniqueConstraintCols=" << to_string(uniqueConstraintCols); + out << "AddNotNullConstraintRequest("; + out << "notNullConstraintCols=" << to_string(notNullConstraintCols); out << ")"; } -AddNotNullConstraintRequest::~AddNotNullConstraintRequest() throw() { +AddDefaultConstraintRequest::~AddDefaultConstraintRequest() throw() { } -void AddNotNullConstraintRequest::__set_notNullConstraintCols(const std::vector & val) { - this->notNullConstraintCols = val; +void AddDefaultConstraintRequest::__set_defaultConstraintCols(const std::vector & val) { + this->defaultConstraintCols = val; } -uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t AddDefaultConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -10441,7 +10988,7 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol using ::apache::thrift::protocol::TProtocolException; - bool isset_notNullConstraintCols = false; + bool isset_defaultConstraintCols = false; while (true) { @@ -10454,19 +11001,19 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol case 1: if (ftype == ::apache::thrift::protocol::T_LIST) { { - this->notNullConstraintCols.clear(); - uint32_t _size427; - ::apache::thrift::protocol::TType _etype430; - xfer += iprot->readListBegin(_etype430, _size427); - this->notNullConstraintCols.resize(_size427); - uint32_t _i431; - for (_i431 = 0; _i431 < _size427; ++_i431) + this->defaultConstraintCols.clear(); + uint32_t _size447; + ::apache::thrift::protocol::TType _etype450; + xfer += iprot->readListBegin(_etype450, _size447); + this->defaultConstraintCols.resize(_size447); + uint32_t _i451; + for (_i451 = 0; _i451 < _size447; ++_i451) { - xfer += this->notNullConstraintCols[_i431].read(iprot); + xfer += this->defaultConstraintCols[_i451].read(iprot); } xfer += iprot->readListEnd(); } - isset_notNullConstraintCols = true; + isset_defaultConstraintCols = true; } else { xfer += iprot->skip(ftype); } @@ -10480,23 +11027,23 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol xfer += iprot->readStructEnd(); - if (!isset_notNullConstraintCols) + if (!isset_defaultConstraintCols) throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } -uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t AddDefaultConstraintRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("AddNotNullConstraintRequest"); + xfer += oprot->writeStructBegin("AddDefaultConstraintRequest"); - xfer += oprot->writeFieldBegin("notNullConstraintCols", ::apache::thrift::protocol::T_LIST, 1); + xfer += oprot->writeFieldBegin("defaultConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraintCols.size())); - std::vector ::const_iterator _iter432; - for (_iter432 = this->notNullConstraintCols.begin(); _iter432 != this->notNullConstraintCols.end(); ++_iter432) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraintCols.size())); + std::vector ::const_iterator _iter452; + for (_iter452 = this->defaultConstraintCols.begin(); _iter452 != this->defaultConstraintCols.end(); ++_iter452) { - xfer += (*_iter432).write(oprot); + xfer += (*_iter452).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10507,22 +11054,22 @@ uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtoco return xfer; } -void swap(AddNotNullConstraintRequest &a, AddNotNullConstraintRequest &b) { +void swap(AddDefaultConstraintRequest &a, AddDefaultConstraintRequest &b) { using ::std::swap; - swap(a.notNullConstraintCols, b.notNullConstraintCols); + swap(a.defaultConstraintCols, b.defaultConstraintCols); } -AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other433) { - notNullConstraintCols = other433.notNullConstraintCols; +AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other453) { + defaultConstraintCols = other453.defaultConstraintCols; } -AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other434) { - notNullConstraintCols = other434.notNullConstraintCols; +AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other454) { + defaultConstraintCols = other454.defaultConstraintCols; return *this; } -void AddNotNullConstraintRequest::printTo(std::ostream& out) const { +void AddDefaultConstraintRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "AddNotNullConstraintRequest("; - out << "notNullConstraintCols=" << to_string(notNullConstraintCols); + out << "AddDefaultConstraintRequest("; + out << "defaultConstraintCols=" << to_string(defaultConstraintCols); out << ")"; } @@ -10566,14 +11113,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size435; - ::apache::thrift::protocol::TType _etype438; - xfer += iprot->readListBegin(_etype438, _size435); - this->partitions.resize(_size435); - uint32_t _i439; - for (_i439 = 0; _i439 < _size435; ++_i439) + uint32_t _size455; + ::apache::thrift::protocol::TType _etype458; + xfer += iprot->readListBegin(_etype458, _size455); + this->partitions.resize(_size455); + uint32_t _i459; + for (_i459 = 0; _i459 < _size455; ++_i459) { - xfer += this->partitions[_i439].read(iprot); + xfer += this->partitions[_i459].read(iprot); } xfer += iprot->readListEnd(); } @@ -10614,10 +11161,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter440; - for (_iter440 = this->partitions.begin(); _iter440 != this->partitions.end(); ++_iter440) + std::vector ::const_iterator _iter460; + for (_iter460 = this->partitions.begin(); _iter460 != this->partitions.end(); ++_iter460) { - xfer += (*_iter440).write(oprot); + xfer += (*_iter460).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10638,13 +11185,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) { swap(a.hasUnknownPartitions, b.hasUnknownPartitions); } -PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other441) { - partitions = other441.partitions; - hasUnknownPartitions = other441.hasUnknownPartitions; +PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other461) { + partitions = other461.partitions; + hasUnknownPartitions = other461.hasUnknownPartitions; } -PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other442) { - partitions = other442.partitions; - hasUnknownPartitions = other442.hasUnknownPartitions; +PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other462) { + partitions = other462.partitions; + hasUnknownPartitions = other462.hasUnknownPartitions; return *this; } void PartitionsByExprResult::printTo(std::ostream& out) const { @@ -10806,21 +11353,21 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) { swap(a.__isset, b.__isset); } -PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other443) { - dbName = other443.dbName; - tblName = other443.tblName; - expr = other443.expr; - defaultPartitionName = other443.defaultPartitionName; - maxParts = other443.maxParts; - __isset = other443.__isset; -} -PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other444) { - dbName = other444.dbName; - tblName = other444.tblName; - expr = other444.expr; - defaultPartitionName = other444.defaultPartitionName; - maxParts = other444.maxParts; - __isset = other444.__isset; +PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other463) { + dbName = other463.dbName; + tblName = other463.tblName; + expr = other463.expr; + defaultPartitionName = other463.defaultPartitionName; + maxParts = other463.maxParts; + __isset = other463.__isset; +} +PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other464) { + dbName = other464.dbName; + tblName = other464.tblName; + expr = other464.expr; + defaultPartitionName = other464.defaultPartitionName; + maxParts = other464.maxParts; + __isset = other464.__isset; return *this; } void PartitionsByExprRequest::printTo(std::ostream& out) const { @@ -10869,14 +11416,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size445; - ::apache::thrift::protocol::TType _etype448; - xfer += iprot->readListBegin(_etype448, _size445); - this->tableStats.resize(_size445); - uint32_t _i449; - for (_i449 = 0; _i449 < _size445; ++_i449) + uint32_t _size465; + ::apache::thrift::protocol::TType _etype468; + xfer += iprot->readListBegin(_etype468, _size465); + this->tableStats.resize(_size465); + uint32_t _i469; + for (_i469 = 0; _i469 < _size465; ++_i469) { - xfer += this->tableStats[_i449].read(iprot); + xfer += this->tableStats[_i469].read(iprot); } xfer += iprot->readListEnd(); } @@ -10907,10 +11454,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter450; - for (_iter450 = this->tableStats.begin(); _iter450 != this->tableStats.end(); ++_iter450) + std::vector ::const_iterator _iter470; + for (_iter470 = this->tableStats.begin(); _iter470 != this->tableStats.end(); ++_iter470) { - xfer += (*_iter450).write(oprot); + xfer += (*_iter470).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10926,11 +11473,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) { swap(a.tableStats, b.tableStats); } -TableStatsResult::TableStatsResult(const TableStatsResult& other451) { - tableStats = other451.tableStats; +TableStatsResult::TableStatsResult(const TableStatsResult& other471) { + tableStats = other471.tableStats; } -TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other452) { - tableStats = other452.tableStats; +TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other472) { + tableStats = other472.tableStats; return *this; } void TableStatsResult::printTo(std::ostream& out) const { @@ -10975,26 +11522,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size453; - ::apache::thrift::protocol::TType _ktype454; - ::apache::thrift::protocol::TType _vtype455; - xfer += iprot->readMapBegin(_ktype454, _vtype455, _size453); - uint32_t _i457; - for (_i457 = 0; _i457 < _size453; ++_i457) + uint32_t _size473; + ::apache::thrift::protocol::TType _ktype474; + ::apache::thrift::protocol::TType _vtype475; + xfer += iprot->readMapBegin(_ktype474, _vtype475, _size473); + uint32_t _i477; + for (_i477 = 0; _i477 < _size473; ++_i477) { - std::string _key458; - xfer += iprot->readString(_key458); - std::vector & _val459 = this->partStats[_key458]; + std::string _key478; + xfer += iprot->readString(_key478); + std::vector & _val479 = this->partStats[_key478]; { - _val459.clear(); - uint32_t _size460; - ::apache::thrift::protocol::TType _etype463; - xfer += iprot->readListBegin(_etype463, _size460); - _val459.resize(_size460); - uint32_t _i464; - for (_i464 = 0; _i464 < _size460; ++_i464) + _val479.clear(); + uint32_t _size480; + ::apache::thrift::protocol::TType _etype483; + xfer += iprot->readListBegin(_etype483, _size480); + _val479.resize(_size480); + uint32_t _i484; + for (_i484 = 0; _i484 < _size480; ++_i484) { - xfer += _val459[_i464].read(iprot); + xfer += _val479[_i484].read(iprot); } xfer += iprot->readListEnd(); } @@ -11028,16 +11575,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter465; - for (_iter465 = this->partStats.begin(); _iter465 != this->partStats.end(); ++_iter465) + std::map > ::const_iterator _iter485; + for (_iter485 = this->partStats.begin(); _iter485 != this->partStats.end(); ++_iter485) { - xfer += oprot->writeString(_iter465->first); + xfer += oprot->writeString(_iter485->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter465->second.size())); - std::vector ::const_iterator _iter466; - for (_iter466 = _iter465->second.begin(); _iter466 != _iter465->second.end(); ++_iter466) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter485->second.size())); + std::vector ::const_iterator _iter486; + for (_iter486 = _iter485->second.begin(); _iter486 != _iter485->second.end(); ++_iter486) { - xfer += (*_iter466).write(oprot); + xfer += (*_iter486).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11056,11 +11603,11 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { swap(a.partStats, b.partStats); } -PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other467) { - partStats = other467.partStats; +PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other487) { + partStats = other487.partStats; } -PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other468) { - partStats = other468.partStats; +PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other488) { + partStats = other488.partStats; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { @@ -11131,14 +11678,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size469; - ::apache::thrift::protocol::TType _etype472; - xfer += iprot->readListBegin(_etype472, _size469); - this->colNames.resize(_size469); - uint32_t _i473; - for (_i473 = 0; _i473 < _size469; ++_i473) + uint32_t _size489; + ::apache::thrift::protocol::TType _etype492; + xfer += iprot->readListBegin(_etype492, _size489); + this->colNames.resize(_size489); + uint32_t _i493; + for (_i493 = 0; _i493 < _size489; ++_i493) { - xfer += iprot->readString(this->colNames[_i473]); + xfer += iprot->readString(this->colNames[_i493]); } xfer += iprot->readListEnd(); } @@ -11181,10 +11728,10 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter474; - for (_iter474 = this->colNames.begin(); _iter474 != this->colNames.end(); ++_iter474) + std::vector ::const_iterator _iter494; + for (_iter494 = this->colNames.begin(); _iter494 != this->colNames.end(); ++_iter494) { - xfer += oprot->writeString((*_iter474)); + xfer += oprot->writeString((*_iter494)); } xfer += oprot->writeListEnd(); } @@ -11202,15 +11749,15 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.colNames, b.colNames); } -TableStatsRequest::TableStatsRequest(const TableStatsRequest& other475) { - dbName = other475.dbName; - tblName = other475.tblName; - colNames = other475.colNames; +TableStatsRequest::TableStatsRequest(const TableStatsRequest& other495) { + dbName = other495.dbName; + tblName = other495.tblName; + colNames = other495.colNames; } -TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other476) { - dbName = other476.dbName; - tblName = other476.tblName; - colNames = other476.colNames; +TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other496) { + dbName = other496.dbName; + tblName = other496.tblName; + colNames = other496.colNames; return *this; } void TableStatsRequest::printTo(std::ostream& out) const { @@ -11288,14 +11835,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size477; - ::apache::thrift::protocol::TType _etype480; - xfer += iprot->readListBegin(_etype480, _size477); - this->colNames.resize(_size477); - uint32_t _i481; - for (_i481 = 0; _i481 < _size477; ++_i481) + uint32_t _size497; + ::apache::thrift::protocol::TType _etype500; + xfer += iprot->readListBegin(_etype500, _size497); + this->colNames.resize(_size497); + uint32_t _i501; + for (_i501 = 0; _i501 < _size497; ++_i501) { - xfer += iprot->readString(this->colNames[_i481]); + xfer += iprot->readString(this->colNames[_i501]); } xfer += iprot->readListEnd(); } @@ -11308,14 +11855,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size482; - ::apache::thrift::protocol::TType _etype485; - xfer += iprot->readListBegin(_etype485, _size482); - this->partNames.resize(_size482); - uint32_t _i486; - for (_i486 = 0; _i486 < _size482; ++_i486) + uint32_t _size502; + ::apache::thrift::protocol::TType _etype505; + xfer += iprot->readListBegin(_etype505, _size502); + this->partNames.resize(_size502); + uint32_t _i506; + for (_i506 = 0; _i506 < _size502; ++_i506) { - xfer += iprot->readString(this->partNames[_i486]); + xfer += iprot->readString(this->partNames[_i506]); } xfer += iprot->readListEnd(); } @@ -11360,10 +11907,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter487; - for (_iter487 = this->colNames.begin(); _iter487 != this->colNames.end(); ++_iter487) + std::vector ::const_iterator _iter507; + for (_iter507 = this->colNames.begin(); _iter507 != this->colNames.end(); ++_iter507) { - xfer += oprot->writeString((*_iter487)); + xfer += oprot->writeString((*_iter507)); } xfer += oprot->writeListEnd(); } @@ -11372,10 +11919,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter488; - for (_iter488 = this->partNames.begin(); _iter488 != this->partNames.end(); ++_iter488) + std::vector ::const_iterator _iter508; + for (_iter508 = this->partNames.begin(); _iter508 != this->partNames.end(); ++_iter508) { - xfer += oprot->writeString((*_iter488)); + xfer += oprot->writeString((*_iter508)); } xfer += oprot->writeListEnd(); } @@ -11394,17 +11941,17 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.partNames, b.partNames); } -PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other489) { - dbName = other489.dbName; - tblName = other489.tblName; - colNames = other489.colNames; - partNames = other489.partNames; +PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other509) { + dbName = other509.dbName; + tblName = other509.tblName; + colNames = other509.colNames; + partNames = other509.partNames; } -PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other490) { - dbName = other490.dbName; - tblName = other490.tblName; - colNames = other490.colNames; - partNames = other490.partNames; +PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other510) { + dbName = other510.dbName; + tblName = other510.tblName; + colNames = other510.colNames; + partNames = other510.partNames; return *this; } void PartitionsStatsRequest::printTo(std::ostream& out) const { @@ -11452,14 +11999,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size491; - ::apache::thrift::protocol::TType _etype494; - xfer += iprot->readListBegin(_etype494, _size491); - this->partitions.resize(_size491); - uint32_t _i495; - for (_i495 = 0; _i495 < _size491; ++_i495) + uint32_t _size511; + ::apache::thrift::protocol::TType _etype514; + xfer += iprot->readListBegin(_etype514, _size511); + this->partitions.resize(_size511); + uint32_t _i515; + for (_i515 = 0; _i515 < _size511; ++_i515) { - xfer += this->partitions[_i495].read(iprot); + xfer += this->partitions[_i515].read(iprot); } xfer += iprot->readListEnd(); } @@ -11489,10 +12036,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter496; - for (_iter496 = this->partitions.begin(); _iter496 != this->partitions.end(); ++_iter496) + std::vector ::const_iterator _iter516; + for (_iter516 = this->partitions.begin(); _iter516 != this->partitions.end(); ++_iter516) { - xfer += (*_iter496).write(oprot); + xfer += (*_iter516).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11509,13 +12056,13 @@ void swap(AddPartitionsResult &a, AddPartitionsResult &b) { swap(a.__isset, b.__isset); } -AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other497) { - partitions = other497.partitions; - __isset = other497.__isset; +AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other517) { + partitions = other517.partitions; + __isset = other517.__isset; } -AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other498) { - partitions = other498.partitions; - __isset = other498.__isset; +AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other518) { + partitions = other518.partitions; + __isset = other518.__isset; return *this; } void AddPartitionsResult::printTo(std::ostream& out) const { @@ -11596,14 +12143,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size499; - ::apache::thrift::protocol::TType _etype502; - xfer += iprot->readListBegin(_etype502, _size499); - this->parts.resize(_size499); - uint32_t _i503; - for (_i503 = 0; _i503 < _size499; ++_i503) + uint32_t _size519; + ::apache::thrift::protocol::TType _etype522; + xfer += iprot->readListBegin(_etype522, _size519); + this->parts.resize(_size519); + uint32_t _i523; + for (_i523 = 0; _i523 < _size519; ++_i523) { - xfer += this->parts[_i503].read(iprot); + xfer += this->parts[_i523].read(iprot); } xfer += iprot->readListEnd(); } @@ -11664,10 +12211,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter504; - for (_iter504 = this->parts.begin(); _iter504 != this->parts.end(); ++_iter504) + std::vector ::const_iterator _iter524; + for (_iter524 = this->parts.begin(); _iter524 != this->parts.end(); ++_iter524) { - xfer += (*_iter504).write(oprot); + xfer += (*_iter524).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11697,21 +12244,21 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.__isset, b.__isset); } -AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other505) { - dbName = other505.dbName; - tblName = other505.tblName; - parts = other505.parts; - ifNotExists = other505.ifNotExists; - needResult = other505.needResult; - __isset = other505.__isset; -} -AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other506) { - dbName = other506.dbName; - tblName = other506.tblName; - parts = other506.parts; - ifNotExists = other506.ifNotExists; - needResult = other506.needResult; - __isset = other506.__isset; +AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other525) { + dbName = other525.dbName; + tblName = other525.tblName; + parts = other525.parts; + ifNotExists = other525.ifNotExists; + needResult = other525.needResult; + __isset = other525.__isset; +} +AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other526) { + dbName = other526.dbName; + tblName = other526.tblName; + parts = other526.parts; + ifNotExists = other526.ifNotExists; + needResult = other526.needResult; + __isset = other526.__isset; return *this; } void AddPartitionsRequest::printTo(std::ostream& out) const { @@ -11760,14 +12307,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size507; - ::apache::thrift::protocol::TType _etype510; - xfer += iprot->readListBegin(_etype510, _size507); - this->partitions.resize(_size507); - uint32_t _i511; - for (_i511 = 0; _i511 < _size507; ++_i511) + uint32_t _size527; + ::apache::thrift::protocol::TType _etype530; + xfer += iprot->readListBegin(_etype530, _size527); + this->partitions.resize(_size527); + uint32_t _i531; + for (_i531 = 0; _i531 < _size527; ++_i531) { - xfer += this->partitions[_i511].read(iprot); + xfer += this->partitions[_i531].read(iprot); } xfer += iprot->readListEnd(); } @@ -11797,10 +12344,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter512; - for (_iter512 = this->partitions.begin(); _iter512 != this->partitions.end(); ++_iter512) + std::vector ::const_iterator _iter532; + for (_iter532 = this->partitions.begin(); _iter532 != this->partitions.end(); ++_iter532) { - xfer += (*_iter512).write(oprot); + xfer += (*_iter532).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11817,13 +12364,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) { swap(a.__isset, b.__isset); } -DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other513) { - partitions = other513.partitions; - __isset = other513.__isset; +DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other533) { + partitions = other533.partitions; + __isset = other533.__isset; } -DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other514) { - partitions = other514.partitions; - __isset = other514.__isset; +DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other534) { + partitions = other534.partitions; + __isset = other534.__isset; return *this; } void DropPartitionsResult::printTo(std::ostream& out) const { @@ -11925,15 +12472,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) { swap(a.__isset, b.__isset); } -DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other515) { - expr = other515.expr; - partArchiveLevel = other515.partArchiveLevel; - __isset = other515.__isset; +DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other535) { + expr = other535.expr; + partArchiveLevel = other535.partArchiveLevel; + __isset = other535.__isset; } -DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other516) { - expr = other516.expr; - partArchiveLevel = other516.partArchiveLevel; - __isset = other516.__isset; +DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other536) { + expr = other536.expr; + partArchiveLevel = other536.partArchiveLevel; + __isset = other536.__isset; return *this; } void DropPartitionsExpr::printTo(std::ostream& out) const { @@ -11982,14 +12529,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size517; - ::apache::thrift::protocol::TType _etype520; - xfer += iprot->readListBegin(_etype520, _size517); - this->names.resize(_size517); - uint32_t _i521; - for (_i521 = 0; _i521 < _size517; ++_i521) + uint32_t _size537; + ::apache::thrift::protocol::TType _etype540; + xfer += iprot->readListBegin(_etype540, _size537); + this->names.resize(_size537); + uint32_t _i541; + for (_i541 = 0; _i541 < _size537; ++_i541) { - xfer += iprot->readString(this->names[_i521]); + xfer += iprot->readString(this->names[_i541]); } xfer += iprot->readListEnd(); } @@ -12002,14 +12549,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size522; - ::apache::thrift::protocol::TType _etype525; - xfer += iprot->readListBegin(_etype525, _size522); - this->exprs.resize(_size522); - uint32_t _i526; - for (_i526 = 0; _i526 < _size522; ++_i526) + uint32_t _size542; + ::apache::thrift::protocol::TType _etype545; + xfer += iprot->readListBegin(_etype545, _size542); + this->exprs.resize(_size542); + uint32_t _i546; + for (_i546 = 0; _i546 < _size542; ++_i546) { - xfer += this->exprs[_i526].read(iprot); + xfer += this->exprs[_i546].read(iprot); } xfer += iprot->readListEnd(); } @@ -12038,10 +12585,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter527; - for (_iter527 = this->names.begin(); _iter527 != this->names.end(); ++_iter527) + std::vector ::const_iterator _iter547; + for (_iter547 = this->names.begin(); _iter547 != this->names.end(); ++_iter547) { - xfer += oprot->writeString((*_iter527)); + xfer += oprot->writeString((*_iter547)); } xfer += oprot->writeListEnd(); } @@ -12050,10 +12597,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter528; - for (_iter528 = this->exprs.begin(); _iter528 != this->exprs.end(); ++_iter528) + std::vector ::const_iterator _iter548; + for (_iter548 = this->exprs.begin(); _iter548 != this->exprs.end(); ++_iter548) { - xfer += (*_iter528).write(oprot); + xfer += (*_iter548).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12071,15 +12618,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) { swap(a.__isset, b.__isset); } -RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other529) { - names = other529.names; - exprs = other529.exprs; - __isset = other529.__isset; +RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other549) { + names = other549.names; + exprs = other549.exprs; + __isset = other549.__isset; } -RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other530) { - names = other530.names; - exprs = other530.exprs; - __isset = other530.__isset; +RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other550) { + names = other550.names; + exprs = other550.exprs; + __isset = other550.__isset; return *this; } void RequestPartsSpec::printTo(std::ostream& out) const { @@ -12298,27 +12845,27 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) { swap(a.__isset, b.__isset); } -DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other531) { - dbName = other531.dbName; - tblName = other531.tblName; - parts = other531.parts; - deleteData = other531.deleteData; - ifExists = other531.ifExists; - ignoreProtection = other531.ignoreProtection; - environmentContext = other531.environmentContext; - needResult = other531.needResult; - __isset = other531.__isset; -} -DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other532) { - dbName = other532.dbName; - tblName = other532.tblName; - parts = other532.parts; - deleteData = other532.deleteData; - ifExists = other532.ifExists; - ignoreProtection = other532.ignoreProtection; - environmentContext = other532.environmentContext; - needResult = other532.needResult; - __isset = other532.__isset; +DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other551) { + dbName = other551.dbName; + tblName = other551.tblName; + parts = other551.parts; + deleteData = other551.deleteData; + ifExists = other551.ifExists; + ignoreProtection = other551.ignoreProtection; + environmentContext = other551.environmentContext; + needResult = other551.needResult; + __isset = other551.__isset; +} +DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other552) { + dbName = other552.dbName; + tblName = other552.tblName; + parts = other552.parts; + deleteData = other552.deleteData; + ifExists = other552.ifExists; + ignoreProtection = other552.ignoreProtection; + environmentContext = other552.environmentContext; + needResult = other552.needResult; + __isset = other552.__isset; return *this; } void DropPartitionsRequest::printTo(std::ostream& out) const { @@ -12421,14 +12968,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size533; - ::apache::thrift::protocol::TType _etype536; - xfer += iprot->readListBegin(_etype536, _size533); - this->partitionKeys.resize(_size533); - uint32_t _i537; - for (_i537 = 0; _i537 < _size533; ++_i537) + uint32_t _size553; + ::apache::thrift::protocol::TType _etype556; + xfer += iprot->readListBegin(_etype556, _size553); + this->partitionKeys.resize(_size553); + uint32_t _i557; + for (_i557 = 0; _i557 < _size553; ++_i557) { - xfer += this->partitionKeys[_i537].read(iprot); + xfer += this->partitionKeys[_i557].read(iprot); } xfer += iprot->readListEnd(); } @@ -12457,14 +13004,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionOrder.clear(); - uint32_t _size538; - ::apache::thrift::protocol::TType _etype541; - xfer += iprot->readListBegin(_etype541, _size538); - this->partitionOrder.resize(_size538); - uint32_t _i542; - for (_i542 = 0; _i542 < _size538; ++_i542) + uint32_t _size558; + ::apache::thrift::protocol::TType _etype561; + xfer += iprot->readListBegin(_etype561, _size558); + this->partitionOrder.resize(_size558); + uint32_t _i562; + for (_i562 = 0; _i562 < _size558; ++_i562) { - xfer += this->partitionOrder[_i542].read(iprot); + xfer += this->partitionOrder[_i562].read(iprot); } xfer += iprot->readListEnd(); } @@ -12523,10 +13070,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter543; - for (_iter543 = this->partitionKeys.begin(); _iter543 != this->partitionKeys.end(); ++_iter543) + std::vector ::const_iterator _iter563; + for (_iter563 = this->partitionKeys.begin(); _iter563 != this->partitionKeys.end(); ++_iter563) { - xfer += (*_iter543).write(oprot); + xfer += (*_iter563).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12546,10 +13093,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionOrder", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionOrder.size())); - std::vector ::const_iterator _iter544; - for (_iter544 = this->partitionOrder.begin(); _iter544 != this->partitionOrder.end(); ++_iter544) + std::vector ::const_iterator _iter564; + for (_iter564 = this->partitionOrder.begin(); _iter564 != this->partitionOrder.end(); ++_iter564) { - xfer += (*_iter544).write(oprot); + xfer += (*_iter564).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12583,27 +13130,27 @@ void swap(PartitionValuesRequest &a, PartitionValuesRequest &b) { swap(a.__isset, b.__isset); } -PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other545) { - dbName = other545.dbName; - tblName = other545.tblName; - partitionKeys = other545.partitionKeys; - applyDistinct = other545.applyDistinct; - filter = other545.filter; - partitionOrder = other545.partitionOrder; - ascending = other545.ascending; - maxParts = other545.maxParts; - __isset = other545.__isset; -} -PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other546) { - dbName = other546.dbName; - tblName = other546.tblName; - partitionKeys = other546.partitionKeys; - applyDistinct = other546.applyDistinct; - filter = other546.filter; - partitionOrder = other546.partitionOrder; - ascending = other546.ascending; - maxParts = other546.maxParts; - __isset = other546.__isset; +PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other565) { + dbName = other565.dbName; + tblName = other565.tblName; + partitionKeys = other565.partitionKeys; + applyDistinct = other565.applyDistinct; + filter = other565.filter; + partitionOrder = other565.partitionOrder; + ascending = other565.ascending; + maxParts = other565.maxParts; + __isset = other565.__isset; +} +PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other566) { + dbName = other566.dbName; + tblName = other566.tblName; + partitionKeys = other566.partitionKeys; + applyDistinct = other566.applyDistinct; + filter = other566.filter; + partitionOrder = other566.partitionOrder; + ascending = other566.ascending; + maxParts = other566.maxParts; + __isset = other566.__isset; return *this; } void PartitionValuesRequest::printTo(std::ostream& out) const { @@ -12655,14 +13202,14 @@ uint32_t PartitionValuesRow::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->row.clear(); - uint32_t _size547; - ::apache::thrift::protocol::TType _etype550; - xfer += iprot->readListBegin(_etype550, _size547); - this->row.resize(_size547); - uint32_t _i551; - for (_i551 = 0; _i551 < _size547; ++_i551) + uint32_t _size567; + ::apache::thrift::protocol::TType _etype570; + xfer += iprot->readListBegin(_etype570, _size567); + this->row.resize(_size567); + uint32_t _i571; + for (_i571 = 0; _i571 < _size567; ++_i571) { - xfer += iprot->readString(this->row[_i551]); + xfer += iprot->readString(this->row[_i571]); } xfer += iprot->readListEnd(); } @@ -12693,10 +13240,10 @@ uint32_t PartitionValuesRow::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("row", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->row.size())); - std::vector ::const_iterator _iter552; - for (_iter552 = this->row.begin(); _iter552 != this->row.end(); ++_iter552) + std::vector ::const_iterator _iter572; + for (_iter572 = this->row.begin(); _iter572 != this->row.end(); ++_iter572) { - xfer += oprot->writeString((*_iter552)); + xfer += oprot->writeString((*_iter572)); } xfer += oprot->writeListEnd(); } @@ -12712,11 +13259,11 @@ void swap(PartitionValuesRow &a, PartitionValuesRow &b) { swap(a.row, b.row); } -PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other553) { - row = other553.row; +PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other573) { + row = other573.row; } -PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other554) { - row = other554.row; +PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other574) { + row = other574.row; return *this; } void PartitionValuesRow::printTo(std::ostream& out) const { @@ -12761,14 +13308,14 @@ uint32_t PartitionValuesResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionValues.clear(); - uint32_t _size555; - ::apache::thrift::protocol::TType _etype558; - xfer += iprot->readListBegin(_etype558, _size555); - this->partitionValues.resize(_size555); - uint32_t _i559; - for (_i559 = 0; _i559 < _size555; ++_i559) + uint32_t _size575; + ::apache::thrift::protocol::TType _etype578; + xfer += iprot->readListBegin(_etype578, _size575); + this->partitionValues.resize(_size575); + uint32_t _i579; + for (_i579 = 0; _i579 < _size575; ++_i579) { - xfer += this->partitionValues[_i559].read(iprot); + xfer += this->partitionValues[_i579].read(iprot); } xfer += iprot->readListEnd(); } @@ -12799,10 +13346,10 @@ uint32_t PartitionValuesResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("partitionValues", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionValues.size())); - std::vector ::const_iterator _iter560; - for (_iter560 = this->partitionValues.begin(); _iter560 != this->partitionValues.end(); ++_iter560) + std::vector ::const_iterator _iter580; + for (_iter580 = this->partitionValues.begin(); _iter580 != this->partitionValues.end(); ++_iter580) { - xfer += (*_iter560).write(oprot); + xfer += (*_iter580).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12818,11 +13365,11 @@ void swap(PartitionValuesResponse &a, PartitionValuesResponse &b) { swap(a.partitionValues, b.partitionValues); } -PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other561) { - partitionValues = other561.partitionValues; +PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other581) { + partitionValues = other581.partitionValues; } -PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other562) { - partitionValues = other562.partitionValues; +PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other582) { + partitionValues = other582.partitionValues; return *this; } void PartitionValuesResponse::printTo(std::ostream& out) const { @@ -12868,9 +13415,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast563; - xfer += iprot->readI32(ecast563); - this->resourceType = (ResourceType::type)ecast563; + int32_t ecast583; + xfer += iprot->readI32(ecast583); + this->resourceType = (ResourceType::type)ecast583; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -12921,15 +13468,15 @@ void swap(ResourceUri &a, ResourceUri &b) { swap(a.__isset, b.__isset); } -ResourceUri::ResourceUri(const ResourceUri& other564) { - resourceType = other564.resourceType; - uri = other564.uri; - __isset = other564.__isset; +ResourceUri::ResourceUri(const ResourceUri& other584) { + resourceType = other584.resourceType; + uri = other584.uri; + __isset = other584.__isset; } -ResourceUri& ResourceUri::operator=(const ResourceUri& other565) { - resourceType = other565.resourceType; - uri = other565.uri; - __isset = other565.__isset; +ResourceUri& ResourceUri::operator=(const ResourceUri& other585) { + resourceType = other585.resourceType; + uri = other585.uri; + __isset = other585.__isset; return *this; } void ResourceUri::printTo(std::ostream& out) const { @@ -13032,9 +13579,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast566; - xfer += iprot->readI32(ecast566); - this->ownerType = (PrincipalType::type)ecast566; + int32_t ecast586; + xfer += iprot->readI32(ecast586); + this->ownerType = (PrincipalType::type)ecast586; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -13050,9 +13597,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast567; - xfer += iprot->readI32(ecast567); - this->functionType = (FunctionType::type)ecast567; + int32_t ecast587; + xfer += iprot->readI32(ecast587); + this->functionType = (FunctionType::type)ecast587; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -13062,14 +13609,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size568; - ::apache::thrift::protocol::TType _etype571; - xfer += iprot->readListBegin(_etype571, _size568); - this->resourceUris.resize(_size568); - uint32_t _i572; - for (_i572 = 0; _i572 < _size568; ++_i572) + uint32_t _size588; + ::apache::thrift::protocol::TType _etype591; + xfer += iprot->readListBegin(_etype591, _size588); + this->resourceUris.resize(_size588); + uint32_t _i592; + for (_i592 = 0; _i592 < _size588; ++_i592) { - xfer += this->resourceUris[_i572].read(iprot); + xfer += this->resourceUris[_i592].read(iprot); } xfer += iprot->readListEnd(); } @@ -13126,10 +13673,10 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter573; - for (_iter573 = this->resourceUris.begin(); _iter573 != this->resourceUris.end(); ++_iter573) + std::vector ::const_iterator _iter593; + for (_iter593 = this->resourceUris.begin(); _iter593 != this->resourceUris.end(); ++_iter593) { - xfer += (*_iter573).write(oprot); + xfer += (*_iter593).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13153,27 +13700,27 @@ void swap(Function &a, Function &b) { swap(a.__isset, b.__isset); } -Function::Function(const Function& other574) { - functionName = other574.functionName; - dbName = other574.dbName; - className = other574.className; - ownerName = other574.ownerName; - ownerType = other574.ownerType; - createTime = other574.createTime; - functionType = other574.functionType; - resourceUris = other574.resourceUris; - __isset = other574.__isset; -} -Function& Function::operator=(const Function& other575) { - functionName = other575.functionName; - dbName = other575.dbName; - className = other575.className; - ownerName = other575.ownerName; - ownerType = other575.ownerType; - createTime = other575.createTime; - functionType = other575.functionType; - resourceUris = other575.resourceUris; - __isset = other575.__isset; +Function::Function(const Function& other594) { + functionName = other594.functionName; + dbName = other594.dbName; + className = other594.className; + ownerName = other594.ownerName; + ownerType = other594.ownerType; + createTime = other594.createTime; + functionType = other594.functionType; + resourceUris = other594.resourceUris; + __isset = other594.__isset; +} +Function& Function::operator=(const Function& other595) { + functionName = other595.functionName; + dbName = other595.dbName; + className = other595.className; + ownerName = other595.ownerName; + ownerType = other595.ownerType; + createTime = other595.createTime; + functionType = other595.functionType; + resourceUris = other595.resourceUris; + __isset = other595.__isset; return *this; } void Function::printTo(std::ostream& out) const { @@ -13271,9 +13818,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast576; - xfer += iprot->readI32(ecast576); - this->state = (TxnState::type)ecast576; + int32_t ecast596; + xfer += iprot->readI32(ecast596); + this->state = (TxnState::type)ecast596; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -13420,29 +13967,29 @@ void swap(TxnInfo &a, TxnInfo &b) { swap(a.__isset, b.__isset); } -TxnInfo::TxnInfo(const TxnInfo& other577) { - id = other577.id; - state = other577.state; - user = other577.user; - hostname = other577.hostname; - agentInfo = other577.agentInfo; - heartbeatCount = other577.heartbeatCount; - metaInfo = other577.metaInfo; - startedTime = other577.startedTime; - lastHeartbeatTime = other577.lastHeartbeatTime; - __isset = other577.__isset; -} -TxnInfo& TxnInfo::operator=(const TxnInfo& other578) { - id = other578.id; - state = other578.state; - user = other578.user; - hostname = other578.hostname; - agentInfo = other578.agentInfo; - heartbeatCount = other578.heartbeatCount; - metaInfo = other578.metaInfo; - startedTime = other578.startedTime; - lastHeartbeatTime = other578.lastHeartbeatTime; - __isset = other578.__isset; +TxnInfo::TxnInfo(const TxnInfo& other597) { + id = other597.id; + state = other597.state; + user = other597.user; + hostname = other597.hostname; + agentInfo = other597.agentInfo; + heartbeatCount = other597.heartbeatCount; + metaInfo = other597.metaInfo; + startedTime = other597.startedTime; + lastHeartbeatTime = other597.lastHeartbeatTime; + __isset = other597.__isset; +} +TxnInfo& TxnInfo::operator=(const TxnInfo& other598) { + id = other598.id; + state = other598.state; + user = other598.user; + hostname = other598.hostname; + agentInfo = other598.agentInfo; + heartbeatCount = other598.heartbeatCount; + metaInfo = other598.metaInfo; + startedTime = other598.startedTime; + lastHeartbeatTime = other598.lastHeartbeatTime; + __isset = other598.__isset; return *this; } void TxnInfo::printTo(std::ostream& out) const { @@ -13508,14 +14055,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size579; - ::apache::thrift::protocol::TType _etype582; - xfer += iprot->readListBegin(_etype582, _size579); - this->open_txns.resize(_size579); - uint32_t _i583; - for (_i583 = 0; _i583 < _size579; ++_i583) + uint32_t _size599; + ::apache::thrift::protocol::TType _etype602; + xfer += iprot->readListBegin(_etype602, _size599); + this->open_txns.resize(_size599); + uint32_t _i603; + for (_i603 = 0; _i603 < _size599; ++_i603) { - xfer += this->open_txns[_i583].read(iprot); + xfer += this->open_txns[_i603].read(iprot); } xfer += iprot->readListEnd(); } @@ -13552,10 +14099,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter584; - for (_iter584 = this->open_txns.begin(); _iter584 != this->open_txns.end(); ++_iter584) + std::vector ::const_iterator _iter604; + for (_iter604 = this->open_txns.begin(); _iter604 != this->open_txns.end(); ++_iter604) { - xfer += (*_iter584).write(oprot); + xfer += (*_iter604).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13572,13 +14119,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other585) { - txn_high_water_mark = other585.txn_high_water_mark; - open_txns = other585.open_txns; +GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other605) { + txn_high_water_mark = other605.txn_high_water_mark; + open_txns = other605.open_txns; } -GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other586) { - txn_high_water_mark = other586.txn_high_water_mark; - open_txns = other586.open_txns; +GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other606) { + txn_high_water_mark = other606.txn_high_water_mark; + open_txns = other606.open_txns; return *this; } void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const { @@ -13647,14 +14194,14 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size587; - ::apache::thrift::protocol::TType _etype590; - xfer += iprot->readListBegin(_etype590, _size587); - this->open_txns.resize(_size587); - uint32_t _i591; - for (_i591 = 0; _i591 < _size587; ++_i591) + uint32_t _size607; + ::apache::thrift::protocol::TType _etype610; + xfer += iprot->readListBegin(_etype610, _size607); + this->open_txns.resize(_size607); + uint32_t _i611; + for (_i611 = 0; _i611 < _size607; ++_i611) { - xfer += iprot->readI64(this->open_txns[_i591]); + xfer += iprot->readI64(this->open_txns[_i611]); } xfer += iprot->readListEnd(); } @@ -13709,10 +14256,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter592; - for (_iter592 = this->open_txns.begin(); _iter592 != this->open_txns.end(); ++_iter592) + std::vector ::const_iterator _iter612; + for (_iter612 = this->open_txns.begin(); _iter612 != this->open_txns.end(); ++_iter612) { - xfer += oprot->writeI64((*_iter592)); + xfer += oprot->writeI64((*_iter612)); } xfer += oprot->writeListEnd(); } @@ -13741,19 +14288,19 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) { swap(a.__isset, b.__isset); } -GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other593) { - txn_high_water_mark = other593.txn_high_water_mark; - open_txns = other593.open_txns; - min_open_txn = other593.min_open_txn; - abortedBits = other593.abortedBits; - __isset = other593.__isset; +GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other613) { + txn_high_water_mark = other613.txn_high_water_mark; + open_txns = other613.open_txns; + min_open_txn = other613.min_open_txn; + abortedBits = other613.abortedBits; + __isset = other613.__isset; } -GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other594) { - txn_high_water_mark = other594.txn_high_water_mark; - open_txns = other594.open_txns; - min_open_txn = other594.min_open_txn; - abortedBits = other594.abortedBits; - __isset = other594.__isset; +GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other614) { + txn_high_water_mark = other614.txn_high_water_mark; + open_txns = other614.open_txns; + min_open_txn = other614.min_open_txn; + abortedBits = other614.abortedBits; + __isset = other614.__isset; return *this; } void GetOpenTxnsResponse::printTo(std::ostream& out) const { @@ -13898,19 +14445,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) { swap(a.__isset, b.__isset); } -OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other595) { - num_txns = other595.num_txns; - user = other595.user; - hostname = other595.hostname; - agentInfo = other595.agentInfo; - __isset = other595.__isset; +OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other615) { + num_txns = other615.num_txns; + user = other615.user; + hostname = other615.hostname; + agentInfo = other615.agentInfo; + __isset = other615.__isset; } -OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other596) { - num_txns = other596.num_txns; - user = other596.user; - hostname = other596.hostname; - agentInfo = other596.agentInfo; - __isset = other596.__isset; +OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other616) { + num_txns = other616.num_txns; + user = other616.user; + hostname = other616.hostname; + agentInfo = other616.agentInfo; + __isset = other616.__isset; return *this; } void OpenTxnRequest::printTo(std::ostream& out) const { @@ -13958,14 +14505,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size597; - ::apache::thrift::protocol::TType _etype600; - xfer += iprot->readListBegin(_etype600, _size597); - this->txn_ids.resize(_size597); - uint32_t _i601; - for (_i601 = 0; _i601 < _size597; ++_i601) + uint32_t _size617; + ::apache::thrift::protocol::TType _etype620; + xfer += iprot->readListBegin(_etype620, _size617); + this->txn_ids.resize(_size617); + uint32_t _i621; + for (_i621 = 0; _i621 < _size617; ++_i621) { - xfer += iprot->readI64(this->txn_ids[_i601]); + xfer += iprot->readI64(this->txn_ids[_i621]); } xfer += iprot->readListEnd(); } @@ -13996,10 +14543,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter602; - for (_iter602 = this->txn_ids.begin(); _iter602 != this->txn_ids.end(); ++_iter602) + std::vector ::const_iterator _iter622; + for (_iter622 = this->txn_ids.begin(); _iter622 != this->txn_ids.end(); ++_iter622) { - xfer += oprot->writeI64((*_iter602)); + xfer += oprot->writeI64((*_iter622)); } xfer += oprot->writeListEnd(); } @@ -14015,11 +14562,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) { swap(a.txn_ids, b.txn_ids); } -OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other603) { - txn_ids = other603.txn_ids; +OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other623) { + txn_ids = other623.txn_ids; } -OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other604) { - txn_ids = other604.txn_ids; +OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other624) { + txn_ids = other624.txn_ids; return *this; } void OpenTxnsResponse::printTo(std::ostream& out) const { @@ -14101,11 +14648,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.txnid, b.txnid); } -AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other605) { - txnid = other605.txnid; +AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other625) { + txnid = other625.txnid; } -AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other606) { - txnid = other606.txnid; +AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other626) { + txnid = other626.txnid; return *this; } void AbortTxnRequest::printTo(std::ostream& out) const { @@ -14150,14 +14697,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size607; - ::apache::thrift::protocol::TType _etype610; - xfer += iprot->readListBegin(_etype610, _size607); - this->txn_ids.resize(_size607); - uint32_t _i611; - for (_i611 = 0; _i611 < _size607; ++_i611) + uint32_t _size627; + ::apache::thrift::protocol::TType _etype630; + xfer += iprot->readListBegin(_etype630, _size627); + this->txn_ids.resize(_size627); + uint32_t _i631; + for (_i631 = 0; _i631 < _size627; ++_i631) { - xfer += iprot->readI64(this->txn_ids[_i611]); + xfer += iprot->readI64(this->txn_ids[_i631]); } xfer += iprot->readListEnd(); } @@ -14188,10 +14735,10 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter612; - for (_iter612 = this->txn_ids.begin(); _iter612 != this->txn_ids.end(); ++_iter612) + std::vector ::const_iterator _iter632; + for (_iter632 = this->txn_ids.begin(); _iter632 != this->txn_ids.end(); ++_iter632) { - xfer += oprot->writeI64((*_iter612)); + xfer += oprot->writeI64((*_iter632)); } xfer += oprot->writeListEnd(); } @@ -14207,11 +14754,11 @@ void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) { swap(a.txn_ids, b.txn_ids); } -AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other613) { - txn_ids = other613.txn_ids; +AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other633) { + txn_ids = other633.txn_ids; } -AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other614) { - txn_ids = other614.txn_ids; +AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other634) { + txn_ids = other634.txn_ids; return *this; } void AbortTxnsRequest::printTo(std::ostream& out) const { @@ -14293,11 +14840,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) { swap(a.txnid, b.txnid); } -CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other615) { - txnid = other615.txnid; +CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other635) { + txnid = other635.txnid; } -CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other616) { - txnid = other616.txnid; +CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other636) { + txnid = other636.txnid; return *this; } void CommitTxnRequest::printTo(std::ostream& out) const { @@ -14347,14 +14894,14 @@ uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fullTableNames.clear(); - uint32_t _size617; - ::apache::thrift::protocol::TType _etype620; - xfer += iprot->readListBegin(_etype620, _size617); - this->fullTableNames.resize(_size617); - uint32_t _i621; - for (_i621 = 0; _i621 < _size617; ++_i621) + uint32_t _size637; + ::apache::thrift::protocol::TType _etype640; + xfer += iprot->readListBegin(_etype640, _size637); + this->fullTableNames.resize(_size637); + uint32_t _i641; + for (_i641 = 0; _i641 < _size637; ++_i641) { - xfer += iprot->readString(this->fullTableNames[_i621]); + xfer += iprot->readString(this->fullTableNames[_i641]); } xfer += iprot->readListEnd(); } @@ -14395,10 +14942,10 @@ uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("fullTableNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->fullTableNames.size())); - std::vector ::const_iterator _iter622; - for (_iter622 = this->fullTableNames.begin(); _iter622 != this->fullTableNames.end(); ++_iter622) + std::vector ::const_iterator _iter642; + for (_iter642 = this->fullTableNames.begin(); _iter642 != this->fullTableNames.end(); ++_iter642) { - xfer += oprot->writeString((*_iter622)); + xfer += oprot->writeString((*_iter642)); } xfer += oprot->writeListEnd(); } @@ -14419,13 +14966,13 @@ void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) { swap(a.validTxnList, b.validTxnList); } -GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other623) { - fullTableNames = other623.fullTableNames; - validTxnList = other623.validTxnList; +GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other643) { + fullTableNames = other643.fullTableNames; + validTxnList = other643.validTxnList; } -GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other624) { - fullTableNames = other624.fullTableNames; - validTxnList = other624.validTxnList; +GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other644) { + fullTableNames = other644.fullTableNames; + validTxnList = other644.validTxnList; return *this; } void GetValidWriteIdsRequest::printTo(std::ostream& out) const { @@ -14507,14 +15054,14 @@ uint32_t TableValidWriteIds::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->invalidWriteIds.clear(); - uint32_t _size625; - ::apache::thrift::protocol::TType _etype628; - xfer += iprot->readListBegin(_etype628, _size625); - this->invalidWriteIds.resize(_size625); - uint32_t _i629; - for (_i629 = 0; _i629 < _size625; ++_i629) + uint32_t _size645; + ::apache::thrift::protocol::TType _etype648; + xfer += iprot->readListBegin(_etype648, _size645); + this->invalidWriteIds.resize(_size645); + uint32_t _i649; + for (_i649 = 0; _i649 < _size645; ++_i649) { - xfer += iprot->readI64(this->invalidWriteIds[_i629]); + xfer += iprot->readI64(this->invalidWriteIds[_i649]); } xfer += iprot->readListEnd(); } @@ -14575,10 +15122,10 @@ uint32_t TableValidWriteIds::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("invalidWriteIds", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->invalidWriteIds.size())); - std::vector ::const_iterator _iter630; - for (_iter630 = this->invalidWriteIds.begin(); _iter630 != this->invalidWriteIds.end(); ++_iter630) + std::vector ::const_iterator _iter650; + for (_iter650 = this->invalidWriteIds.begin(); _iter650 != this->invalidWriteIds.end(); ++_iter650) { - xfer += oprot->writeI64((*_iter630)); + xfer += oprot->writeI64((*_iter650)); } xfer += oprot->writeListEnd(); } @@ -14608,21 +15155,21 @@ void swap(TableValidWriteIds &a, TableValidWriteIds &b) { swap(a.__isset, b.__isset); } -TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other631) { - fullTableName = other631.fullTableName; - writeIdHighWaterMark = other631.writeIdHighWaterMark; - invalidWriteIds = other631.invalidWriteIds; - minOpenWriteId = other631.minOpenWriteId; - abortedBits = other631.abortedBits; - __isset = other631.__isset; -} -TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other632) { - fullTableName = other632.fullTableName; - writeIdHighWaterMark = other632.writeIdHighWaterMark; - invalidWriteIds = other632.invalidWriteIds; - minOpenWriteId = other632.minOpenWriteId; - abortedBits = other632.abortedBits; - __isset = other632.__isset; +TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other651) { + fullTableName = other651.fullTableName; + writeIdHighWaterMark = other651.writeIdHighWaterMark; + invalidWriteIds = other651.invalidWriteIds; + minOpenWriteId = other651.minOpenWriteId; + abortedBits = other651.abortedBits; + __isset = other651.__isset; +} +TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other652) { + fullTableName = other652.fullTableName; + writeIdHighWaterMark = other652.writeIdHighWaterMark; + invalidWriteIds = other652.invalidWriteIds; + minOpenWriteId = other652.minOpenWriteId; + abortedBits = other652.abortedBits; + __isset = other652.__isset; return *this; } void TableValidWriteIds::printTo(std::ostream& out) const { @@ -14671,14 +15218,14 @@ uint32_t GetValidWriteIdsResponse::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblValidWriteIds.clear(); - uint32_t _size633; - ::apache::thrift::protocol::TType _etype636; - xfer += iprot->readListBegin(_etype636, _size633); - this->tblValidWriteIds.resize(_size633); - uint32_t _i637; - for (_i637 = 0; _i637 < _size633; ++_i637) + uint32_t _size653; + ::apache::thrift::protocol::TType _etype656; + xfer += iprot->readListBegin(_etype656, _size653); + this->tblValidWriteIds.resize(_size653); + uint32_t _i657; + for (_i657 = 0; _i657 < _size653; ++_i657) { - xfer += this->tblValidWriteIds[_i637].read(iprot); + xfer += this->tblValidWriteIds[_i657].read(iprot); } xfer += iprot->readListEnd(); } @@ -14709,10 +15256,10 @@ uint32_t GetValidWriteIdsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("tblValidWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tblValidWriteIds.size())); - std::vector ::const_iterator _iter638; - for (_iter638 = this->tblValidWriteIds.begin(); _iter638 != this->tblValidWriteIds.end(); ++_iter638) + std::vector ::const_iterator _iter658; + for (_iter658 = this->tblValidWriteIds.begin(); _iter658 != this->tblValidWriteIds.end(); ++_iter658) { - xfer += (*_iter638).write(oprot); + xfer += (*_iter658).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14728,11 +15275,11 @@ void swap(GetValidWriteIdsResponse &a, GetValidWriteIdsResponse &b) { swap(a.tblValidWriteIds, b.tblValidWriteIds); } -GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other639) { - tblValidWriteIds = other639.tblValidWriteIds; +GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other659) { + tblValidWriteIds = other659.tblValidWriteIds; } -GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other640) { - tblValidWriteIds = other640.tblValidWriteIds; +GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other660) { + tblValidWriteIds = other660.tblValidWriteIds; return *this; } void GetValidWriteIdsResponse::printTo(std::ostream& out) const { @@ -14787,14 +15334,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnIds.clear(); - uint32_t _size641; - ::apache::thrift::protocol::TType _etype644; - xfer += iprot->readListBegin(_etype644, _size641); - this->txnIds.resize(_size641); - uint32_t _i645; - for (_i645 = 0; _i645 < _size641; ++_i645) + uint32_t _size661; + ::apache::thrift::protocol::TType _etype664; + xfer += iprot->readListBegin(_etype664, _size661); + this->txnIds.resize(_size661); + uint32_t _i665; + for (_i665 = 0; _i665 < _size661; ++_i665) { - xfer += iprot->readI64(this->txnIds[_i645]); + xfer += iprot->readI64(this->txnIds[_i665]); } xfer += iprot->readListEnd(); } @@ -14845,10 +15392,10 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("txnIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txnIds.size())); - std::vector ::const_iterator _iter646; - for (_iter646 = this->txnIds.begin(); _iter646 != this->txnIds.end(); ++_iter646) + std::vector ::const_iterator _iter666; + for (_iter666 = this->txnIds.begin(); _iter666 != this->txnIds.end(); ++_iter666) { - xfer += oprot->writeI64((*_iter646)); + xfer += oprot->writeI64((*_iter666)); } xfer += oprot->writeListEnd(); } @@ -14874,15 +15421,15 @@ void swap(AllocateTableWriteIdsRequest &a, AllocateTableWriteIdsRequest &b) { swap(a.tableName, b.tableName); } -AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other647) { - txnIds = other647.txnIds; - dbName = other647.dbName; - tableName = other647.tableName; +AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other667) { + txnIds = other667.txnIds; + dbName = other667.dbName; + tableName = other667.tableName; } -AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other648) { - txnIds = other648.txnIds; - dbName = other648.dbName; - tableName = other648.tableName; +AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other668) { + txnIds = other668.txnIds; + dbName = other668.dbName; + tableName = other668.tableName; return *this; } void AllocateTableWriteIdsRequest::printTo(std::ostream& out) const { @@ -14986,13 +15533,13 @@ void swap(TxnToWriteId &a, TxnToWriteId &b) { swap(a.writeId, b.writeId); } -TxnToWriteId::TxnToWriteId(const TxnToWriteId& other649) { - txnId = other649.txnId; - writeId = other649.writeId; +TxnToWriteId::TxnToWriteId(const TxnToWriteId& other669) { + txnId = other669.txnId; + writeId = other669.writeId; } -TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other650) { - txnId = other650.txnId; - writeId = other650.writeId; +TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other670) { + txnId = other670.txnId; + writeId = other670.writeId; return *this; } void TxnToWriteId::printTo(std::ostream& out) const { @@ -15038,14 +15585,14 @@ uint32_t AllocateTableWriteIdsResponse::read(::apache::thrift::protocol::TProtoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnToWriteIds.clear(); - uint32_t _size651; - ::apache::thrift::protocol::TType _etype654; - xfer += iprot->readListBegin(_etype654, _size651); - this->txnToWriteIds.resize(_size651); - uint32_t _i655; - for (_i655 = 0; _i655 < _size651; ++_i655) + uint32_t _size671; + ::apache::thrift::protocol::TType _etype674; + xfer += iprot->readListBegin(_etype674, _size671); + this->txnToWriteIds.resize(_size671); + uint32_t _i675; + for (_i675 = 0; _i675 < _size671; ++_i675) { - xfer += this->txnToWriteIds[_i655].read(iprot); + xfer += this->txnToWriteIds[_i675].read(iprot); } xfer += iprot->readListEnd(); } @@ -15076,10 +15623,10 @@ uint32_t AllocateTableWriteIdsResponse::write(::apache::thrift::protocol::TProto xfer += oprot->writeFieldBegin("txnToWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->txnToWriteIds.size())); - std::vector ::const_iterator _iter656; - for (_iter656 = this->txnToWriteIds.begin(); _iter656 != this->txnToWriteIds.end(); ++_iter656) + std::vector ::const_iterator _iter676; + for (_iter676 = this->txnToWriteIds.begin(); _iter676 != this->txnToWriteIds.end(); ++_iter676) { - xfer += (*_iter656).write(oprot); + xfer += (*_iter676).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15095,11 +15642,11 @@ void swap(AllocateTableWriteIdsResponse &a, AllocateTableWriteIdsResponse &b) { swap(a.txnToWriteIds, b.txnToWriteIds); } -AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other657) { - txnToWriteIds = other657.txnToWriteIds; +AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other677) { + txnToWriteIds = other677.txnToWriteIds; } -AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other658) { - txnToWriteIds = other658.txnToWriteIds; +AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other678) { + txnToWriteIds = other678.txnToWriteIds; return *this; } void AllocateTableWriteIdsResponse::printTo(std::ostream& out) const { @@ -15177,9 +15724,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast659; - xfer += iprot->readI32(ecast659); - this->type = (LockType::type)ecast659; + int32_t ecast679; + xfer += iprot->readI32(ecast679); + this->type = (LockType::type)ecast679; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -15187,9 +15734,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast660; - xfer += iprot->readI32(ecast660); - this->level = (LockLevel::type)ecast660; + int32_t ecast680; + xfer += iprot->readI32(ecast680); + this->level = (LockLevel::type)ecast680; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -15221,9 +15768,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast661; - xfer += iprot->readI32(ecast661); - this->operationType = (DataOperationType::type)ecast661; + int32_t ecast681; + xfer += iprot->readI32(ecast681); + this->operationType = (DataOperationType::type)ecast681; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -15323,27 +15870,27 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other662) { - type = other662.type; - level = other662.level; - dbname = other662.dbname; - tablename = other662.tablename; - partitionname = other662.partitionname; - operationType = other662.operationType; - isAcid = other662.isAcid; - isDynamicPartitionWrite = other662.isDynamicPartitionWrite; - __isset = other662.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other663) { - type = other663.type; - level = other663.level; - dbname = other663.dbname; - tablename = other663.tablename; - partitionname = other663.partitionname; - operationType = other663.operationType; - isAcid = other663.isAcid; - isDynamicPartitionWrite = other663.isDynamicPartitionWrite; - __isset = other663.__isset; +LockComponent::LockComponent(const LockComponent& other682) { + type = other682.type; + level = other682.level; + dbname = other682.dbname; + tablename = other682.tablename; + partitionname = other682.partitionname; + operationType = other682.operationType; + isAcid = other682.isAcid; + isDynamicPartitionWrite = other682.isDynamicPartitionWrite; + __isset = other682.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other683) { + type = other683.type; + level = other683.level; + dbname = other683.dbname; + tablename = other683.tablename; + partitionname = other683.partitionname; + operationType = other683.operationType; + isAcid = other683.isAcid; + isDynamicPartitionWrite = other683.isDynamicPartitionWrite; + __isset = other683.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -15415,14 +15962,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size664; - ::apache::thrift::protocol::TType _etype667; - xfer += iprot->readListBegin(_etype667, _size664); - this->component.resize(_size664); - uint32_t _i668; - for (_i668 = 0; _i668 < _size664; ++_i668) + uint32_t _size684; + ::apache::thrift::protocol::TType _etype687; + xfer += iprot->readListBegin(_etype687, _size684); + this->component.resize(_size684); + uint32_t _i688; + for (_i688 = 0; _i688 < _size684; ++_i688) { - xfer += this->component[_i668].read(iprot); + xfer += this->component[_i688].read(iprot); } xfer += iprot->readListEnd(); } @@ -15489,10 +16036,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter669; - for (_iter669 = this->component.begin(); _iter669 != this->component.end(); ++_iter669) + std::vector ::const_iterator _iter689; + for (_iter689 = this->component.begin(); _iter689 != this->component.end(); ++_iter689) { - xfer += (*_iter669).write(oprot); + xfer += (*_iter689).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15531,21 +16078,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other670) { - component = other670.component; - txnid = other670.txnid; - user = other670.user; - hostname = other670.hostname; - agentInfo = other670.agentInfo; - __isset = other670.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other671) { - component = other671.component; - txnid = other671.txnid; - user = other671.user; - hostname = other671.hostname; - agentInfo = other671.agentInfo; - __isset = other671.__isset; +LockRequest::LockRequest(const LockRequest& other690) { + component = other690.component; + txnid = other690.txnid; + user = other690.user; + hostname = other690.hostname; + agentInfo = other690.agentInfo; + __isset = other690.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other691) { + component = other691.component; + txnid = other691.txnid; + user = other691.user; + hostname = other691.hostname; + agentInfo = other691.agentInfo; + __isset = other691.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -15605,9 +16152,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast672; - xfer += iprot->readI32(ecast672); - this->state = (LockState::type)ecast672; + int32_t ecast692; + xfer += iprot->readI32(ecast692); + this->state = (LockState::type)ecast692; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -15653,13 +16200,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other673) { - lockid = other673.lockid; - state = other673.state; +LockResponse::LockResponse(const LockResponse& other693) { + lockid = other693.lockid; + state = other693.state; } -LockResponse& LockResponse::operator=(const LockResponse& other674) { - lockid = other674.lockid; - state = other674.state; +LockResponse& LockResponse::operator=(const LockResponse& other694) { + lockid = other694.lockid; + state = other694.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -15781,17 +16328,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other675) { - lockid = other675.lockid; - txnid = other675.txnid; - elapsed_ms = other675.elapsed_ms; - __isset = other675.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other695) { + lockid = other695.lockid; + txnid = other695.txnid; + elapsed_ms = other695.elapsed_ms; + __isset = other695.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other676) { - lockid = other676.lockid; - txnid = other676.txnid; - elapsed_ms = other676.elapsed_ms; - __isset = other676.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other696) { + lockid = other696.lockid; + txnid = other696.txnid; + elapsed_ms = other696.elapsed_ms; + __isset = other696.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -15875,11 +16422,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other677) { - lockid = other677.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other697) { + lockid = other697.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other678) { - lockid = other678.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other698) { + lockid = other698.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -16018,19 +16565,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other679) { - dbname = other679.dbname; - tablename = other679.tablename; - partname = other679.partname; - isExtended = other679.isExtended; - __isset = other679.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other699) { + dbname = other699.dbname; + tablename = other699.tablename; + partname = other699.partname; + isExtended = other699.isExtended; + __isset = other699.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other680) { - dbname = other680.dbname; - tablename = other680.tablename; - partname = other680.partname; - isExtended = other680.isExtended; - __isset = other680.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other700) { + dbname = other700.dbname; + tablename = other700.tablename; + partname = other700.partname; + isExtended = other700.isExtended; + __isset = other700.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -16183,9 +16730,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast681; - xfer += iprot->readI32(ecast681); - this->state = (LockState::type)ecast681; + int32_t ecast701; + xfer += iprot->readI32(ecast701); + this->state = (LockState::type)ecast701; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -16193,9 +16740,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast682; - xfer += iprot->readI32(ecast682); - this->type = (LockType::type)ecast682; + int32_t ecast702; + xfer += iprot->readI32(ecast702); + this->type = (LockType::type)ecast702; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16411,43 +16958,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other683) { - lockid = other683.lockid; - dbname = other683.dbname; - tablename = other683.tablename; - partname = other683.partname; - state = other683.state; - type = other683.type; - txnid = other683.txnid; - lastheartbeat = other683.lastheartbeat; - acquiredat = other683.acquiredat; - user = other683.user; - hostname = other683.hostname; - heartbeatCount = other683.heartbeatCount; - agentInfo = other683.agentInfo; - blockedByExtId = other683.blockedByExtId; - blockedByIntId = other683.blockedByIntId; - lockIdInternal = other683.lockIdInternal; - __isset = other683.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other684) { - lockid = other684.lockid; - dbname = other684.dbname; - tablename = other684.tablename; - partname = other684.partname; - state = other684.state; - type = other684.type; - txnid = other684.txnid; - lastheartbeat = other684.lastheartbeat; - acquiredat = other684.acquiredat; - user = other684.user; - hostname = other684.hostname; - heartbeatCount = other684.heartbeatCount; - agentInfo = other684.agentInfo; - blockedByExtId = other684.blockedByExtId; - blockedByIntId = other684.blockedByIntId; - lockIdInternal = other684.lockIdInternal; - __isset = other684.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other703) { + lockid = other703.lockid; + dbname = other703.dbname; + tablename = other703.tablename; + partname = other703.partname; + state = other703.state; + type = other703.type; + txnid = other703.txnid; + lastheartbeat = other703.lastheartbeat; + acquiredat = other703.acquiredat; + user = other703.user; + hostname = other703.hostname; + heartbeatCount = other703.heartbeatCount; + agentInfo = other703.agentInfo; + blockedByExtId = other703.blockedByExtId; + blockedByIntId = other703.blockedByIntId; + lockIdInternal = other703.lockIdInternal; + __isset = other703.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other704) { + lockid = other704.lockid; + dbname = other704.dbname; + tablename = other704.tablename; + partname = other704.partname; + state = other704.state; + type = other704.type; + txnid = other704.txnid; + lastheartbeat = other704.lastheartbeat; + acquiredat = other704.acquiredat; + user = other704.user; + hostname = other704.hostname; + heartbeatCount = other704.heartbeatCount; + agentInfo = other704.agentInfo; + blockedByExtId = other704.blockedByExtId; + blockedByIntId = other704.blockedByIntId; + lockIdInternal = other704.lockIdInternal; + __isset = other704.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -16506,14 +17053,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size685; - ::apache::thrift::protocol::TType _etype688; - xfer += iprot->readListBegin(_etype688, _size685); - this->locks.resize(_size685); - uint32_t _i689; - for (_i689 = 0; _i689 < _size685; ++_i689) + uint32_t _size705; + ::apache::thrift::protocol::TType _etype708; + xfer += iprot->readListBegin(_etype708, _size705); + this->locks.resize(_size705); + uint32_t _i709; + for (_i709 = 0; _i709 < _size705; ++_i709) { - xfer += this->locks[_i689].read(iprot); + xfer += this->locks[_i709].read(iprot); } xfer += iprot->readListEnd(); } @@ -16542,10 +17089,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter690; - for (_iter690 = this->locks.begin(); _iter690 != this->locks.end(); ++_iter690) + std::vector ::const_iterator _iter710; + for (_iter710 = this->locks.begin(); _iter710 != this->locks.end(); ++_iter710) { - xfer += (*_iter690).write(oprot); + xfer += (*_iter710).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16562,13 +17109,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other691) { - locks = other691.locks; - __isset = other691.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other711) { + locks = other711.locks; + __isset = other711.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other692) { - locks = other692.locks; - __isset = other692.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other712) { + locks = other712.locks; + __isset = other712.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -16669,15 +17216,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other693) { - lockid = other693.lockid; - txnid = other693.txnid; - __isset = other693.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other713) { + lockid = other713.lockid; + txnid = other713.txnid; + __isset = other713.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other694) { - lockid = other694.lockid; - txnid = other694.txnid; - __isset = other694.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other714) { + lockid = other714.lockid; + txnid = other714.txnid; + __isset = other714.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -16780,13 +17327,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other695) { - min = other695.min; - max = other695.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other715) { + min = other715.min; + max = other715.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other696) { - min = other696.min; - max = other696.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other716) { + min = other716.min; + max = other716.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -16837,15 +17384,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size697; - ::apache::thrift::protocol::TType _etype700; - xfer += iprot->readSetBegin(_etype700, _size697); - uint32_t _i701; - for (_i701 = 0; _i701 < _size697; ++_i701) + uint32_t _size717; + ::apache::thrift::protocol::TType _etype720; + xfer += iprot->readSetBegin(_etype720, _size717); + uint32_t _i721; + for (_i721 = 0; _i721 < _size717; ++_i721) { - int64_t _elem702; - xfer += iprot->readI64(_elem702); - this->aborted.insert(_elem702); + int64_t _elem722; + xfer += iprot->readI64(_elem722); + this->aborted.insert(_elem722); } xfer += iprot->readSetEnd(); } @@ -16858,15 +17405,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size703; - ::apache::thrift::protocol::TType _etype706; - xfer += iprot->readSetBegin(_etype706, _size703); - uint32_t _i707; - for (_i707 = 0; _i707 < _size703; ++_i707) + uint32_t _size723; + ::apache::thrift::protocol::TType _etype726; + xfer += iprot->readSetBegin(_etype726, _size723); + uint32_t _i727; + for (_i727 = 0; _i727 < _size723; ++_i727) { - int64_t _elem708; - xfer += iprot->readI64(_elem708); - this->nosuch.insert(_elem708); + int64_t _elem728; + xfer += iprot->readI64(_elem728); + this->nosuch.insert(_elem728); } xfer += iprot->readSetEnd(); } @@ -16899,10 +17446,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter709; - for (_iter709 = this->aborted.begin(); _iter709 != this->aborted.end(); ++_iter709) + std::set ::const_iterator _iter729; + for (_iter729 = this->aborted.begin(); _iter729 != this->aborted.end(); ++_iter729) { - xfer += oprot->writeI64((*_iter709)); + xfer += oprot->writeI64((*_iter729)); } xfer += oprot->writeSetEnd(); } @@ -16911,10 +17458,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter710; - for (_iter710 = this->nosuch.begin(); _iter710 != this->nosuch.end(); ++_iter710) + std::set ::const_iterator _iter730; + for (_iter730 = this->nosuch.begin(); _iter730 != this->nosuch.end(); ++_iter730) { - xfer += oprot->writeI64((*_iter710)); + xfer += oprot->writeI64((*_iter730)); } xfer += oprot->writeSetEnd(); } @@ -16931,13 +17478,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other711) { - aborted = other711.aborted; - nosuch = other711.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other731) { + aborted = other731.aborted; + nosuch = other731.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other712) { - aborted = other712.aborted; - nosuch = other712.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other732) { + aborted = other732.aborted; + nosuch = other732.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -17030,9 +17577,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast713; - xfer += iprot->readI32(ecast713); - this->type = (CompactionType::type)ecast713; + int32_t ecast733; + xfer += iprot->readI32(ecast733); + this->type = (CompactionType::type)ecast733; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -17050,17 +17597,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size714; - ::apache::thrift::protocol::TType _ktype715; - ::apache::thrift::protocol::TType _vtype716; - xfer += iprot->readMapBegin(_ktype715, _vtype716, _size714); - uint32_t _i718; - for (_i718 = 0; _i718 < _size714; ++_i718) + uint32_t _size734; + ::apache::thrift::protocol::TType _ktype735; + ::apache::thrift::protocol::TType _vtype736; + xfer += iprot->readMapBegin(_ktype735, _vtype736, _size734); + uint32_t _i738; + for (_i738 = 0; _i738 < _size734; ++_i738) { - std::string _key719; - xfer += iprot->readString(_key719); - std::string& _val720 = this->properties[_key719]; - xfer += iprot->readString(_val720); + std::string _key739; + xfer += iprot->readString(_key739); + std::string& _val740 = this->properties[_key739]; + xfer += iprot->readString(_val740); } xfer += iprot->readMapEnd(); } @@ -17118,11 +17665,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter721; - for (_iter721 = this->properties.begin(); _iter721 != this->properties.end(); ++_iter721) + std::map ::const_iterator _iter741; + for (_iter741 = this->properties.begin(); _iter741 != this->properties.end(); ++_iter741) { - xfer += oprot->writeString(_iter721->first); - xfer += oprot->writeString(_iter721->second); + xfer += oprot->writeString(_iter741->first); + xfer += oprot->writeString(_iter741->second); } xfer += oprot->writeMapEnd(); } @@ -17144,23 +17691,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other722) { - dbname = other722.dbname; - tablename = other722.tablename; - partitionname = other722.partitionname; - type = other722.type; - runas = other722.runas; - properties = other722.properties; - __isset = other722.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other723) { - dbname = other723.dbname; - tablename = other723.tablename; - partitionname = other723.partitionname; - type = other723.type; - runas = other723.runas; - properties = other723.properties; - __isset = other723.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other742) { + dbname = other742.dbname; + tablename = other742.tablename; + partitionname = other742.partitionname; + type = other742.type; + runas = other742.runas; + properties = other742.properties; + __isset = other742.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other743) { + dbname = other743.dbname; + tablename = other743.tablename; + partitionname = other743.partitionname; + type = other743.type; + runas = other743.runas; + properties = other743.properties; + __isset = other743.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -17287,15 +17834,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) { swap(a.accepted, b.accepted); } -CompactionResponse::CompactionResponse(const CompactionResponse& other724) { - id = other724.id; - state = other724.state; - accepted = other724.accepted; +CompactionResponse::CompactionResponse(const CompactionResponse& other744) { + id = other744.id; + state = other744.state; + accepted = other744.accepted; } -CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other725) { - id = other725.id; - state = other725.state; - accepted = other725.accepted; +CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other745) { + id = other745.id; + state = other745.state; + accepted = other745.accepted; return *this; } void CompactionResponse::printTo(std::ostream& out) const { @@ -17356,11 +17903,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other726) { - (void) other726; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other746) { + (void) other746; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other727) { - (void) other727; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other747) { + (void) other747; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -17486,9 +18033,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast728; - xfer += iprot->readI32(ecast728); - this->type = (CompactionType::type)ecast728; + int32_t ecast748; + xfer += iprot->readI32(ecast748); + this->type = (CompactionType::type)ecast748; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -17675,37 +18222,37 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other729) { - dbname = other729.dbname; - tablename = other729.tablename; - partitionname = other729.partitionname; - type = other729.type; - state = other729.state; - workerid = other729.workerid; - start = other729.start; - runAs = other729.runAs; - hightestTxnId = other729.hightestTxnId; - metaInfo = other729.metaInfo; - endTime = other729.endTime; - hadoopJobId = other729.hadoopJobId; - id = other729.id; - __isset = other729.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other730) { - dbname = other730.dbname; - tablename = other730.tablename; - partitionname = other730.partitionname; - type = other730.type; - state = other730.state; - workerid = other730.workerid; - start = other730.start; - runAs = other730.runAs; - hightestTxnId = other730.hightestTxnId; - metaInfo = other730.metaInfo; - endTime = other730.endTime; - hadoopJobId = other730.hadoopJobId; - id = other730.id; - __isset = other730.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other749) { + dbname = other749.dbname; + tablename = other749.tablename; + partitionname = other749.partitionname; + type = other749.type; + state = other749.state; + workerid = other749.workerid; + start = other749.start; + runAs = other749.runAs; + hightestTxnId = other749.hightestTxnId; + metaInfo = other749.metaInfo; + endTime = other749.endTime; + hadoopJobId = other749.hadoopJobId; + id = other749.id; + __isset = other749.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other750) { + dbname = other750.dbname; + tablename = other750.tablename; + partitionname = other750.partitionname; + type = other750.type; + state = other750.state; + workerid = other750.workerid; + start = other750.start; + runAs = other750.runAs; + hightestTxnId = other750.hightestTxnId; + metaInfo = other750.metaInfo; + endTime = other750.endTime; + hadoopJobId = other750.hadoopJobId; + id = other750.id; + __isset = other750.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -17762,14 +18309,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size731; - ::apache::thrift::protocol::TType _etype734; - xfer += iprot->readListBegin(_etype734, _size731); - this->compacts.resize(_size731); - uint32_t _i735; - for (_i735 = 0; _i735 < _size731; ++_i735) + uint32_t _size751; + ::apache::thrift::protocol::TType _etype754; + xfer += iprot->readListBegin(_etype754, _size751); + this->compacts.resize(_size751); + uint32_t _i755; + for (_i755 = 0; _i755 < _size751; ++_i755) { - xfer += this->compacts[_i735].read(iprot); + xfer += this->compacts[_i755].read(iprot); } xfer += iprot->readListEnd(); } @@ -17800,10 +18347,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter736; - for (_iter736 = this->compacts.begin(); _iter736 != this->compacts.end(); ++_iter736) + std::vector ::const_iterator _iter756; + for (_iter756 = this->compacts.begin(); _iter756 != this->compacts.end(); ++_iter756) { - xfer += (*_iter736).write(oprot); + xfer += (*_iter756).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17819,11 +18366,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other737) { - compacts = other737.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other757) { + compacts = other757.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other738) { - compacts = other738.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other758) { + compacts = other758.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -17925,14 +18472,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size739; - ::apache::thrift::protocol::TType _etype742; - xfer += iprot->readListBegin(_etype742, _size739); - this->partitionnames.resize(_size739); - uint32_t _i743; - for (_i743 = 0; _i743 < _size739; ++_i743) + uint32_t _size759; + ::apache::thrift::protocol::TType _etype762; + xfer += iprot->readListBegin(_etype762, _size759); + this->partitionnames.resize(_size759); + uint32_t _i763; + for (_i763 = 0; _i763 < _size759; ++_i763) { - xfer += iprot->readString(this->partitionnames[_i743]); + xfer += iprot->readString(this->partitionnames[_i763]); } xfer += iprot->readListEnd(); } @@ -17943,9 +18490,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast744; - xfer += iprot->readI32(ecast744); - this->operationType = (DataOperationType::type)ecast744; + int32_t ecast764; + xfer += iprot->readI32(ecast764); + this->operationType = (DataOperationType::type)ecast764; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -17997,10 +18544,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter745; - for (_iter745 = this->partitionnames.begin(); _iter745 != this->partitionnames.end(); ++_iter745) + std::vector ::const_iterator _iter765; + for (_iter765 = this->partitionnames.begin(); _iter765 != this->partitionnames.end(); ++_iter765) { - xfer += oprot->writeString((*_iter745)); + xfer += oprot->writeString((*_iter765)); } xfer += oprot->writeListEnd(); } @@ -18027,23 +18574,23 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other746) { - txnid = other746.txnid; - writeid = other746.writeid; - dbname = other746.dbname; - tablename = other746.tablename; - partitionnames = other746.partitionnames; - operationType = other746.operationType; - __isset = other746.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other747) { - txnid = other747.txnid; - writeid = other747.writeid; - dbname = other747.dbname; - tablename = other747.tablename; - partitionnames = other747.partitionnames; - operationType = other747.operationType; - __isset = other747.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other766) { + txnid = other766.txnid; + writeid = other766.writeid; + dbname = other766.dbname; + tablename = other766.tablename; + partitionnames = other766.partitionnames; + operationType = other766.operationType; + __isset = other766.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other767) { + txnid = other767.txnid; + writeid = other767.writeid; + dbname = other767.dbname; + tablename = other767.tablename; + partitionnames = other767.partitionnames; + operationType = other767.operationType; + __isset = other767.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -18226,23 +18773,23 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.__isset, b.__isset); } -BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other748) { - isnull = other748.isnull; - time = other748.time; - txnid = other748.txnid; - dbname = other748.dbname; - tablename = other748.tablename; - partitionname = other748.partitionname; - __isset = other748.__isset; -} -BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other749) { - isnull = other749.isnull; - time = other749.time; - txnid = other749.txnid; - dbname = other749.dbname; - tablename = other749.tablename; - partitionname = other749.partitionname; - __isset = other749.__isset; +BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other768) { + isnull = other768.isnull; + time = other768.time; + txnid = other768.txnid; + dbname = other768.dbname; + tablename = other768.tablename; + partitionname = other768.partitionname; + __isset = other768.__isset; +} +BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other769) { + isnull = other769.isnull; + time = other769.time; + txnid = other769.txnid; + dbname = other769.dbname; + tablename = other769.tablename; + partitionname = other769.partitionname; + __isset = other769.__isset; return *this; } void BasicTxnInfo::printTo(std::ostream& out) const { @@ -18323,15 +18870,15 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size750; - ::apache::thrift::protocol::TType _etype753; - xfer += iprot->readSetBegin(_etype753, _size750); - uint32_t _i754; - for (_i754 = 0; _i754 < _size750; ++_i754) + uint32_t _size770; + ::apache::thrift::protocol::TType _etype773; + xfer += iprot->readSetBegin(_etype773, _size770); + uint32_t _i774; + for (_i774 = 0; _i774 < _size770; ++_i774) { - std::string _elem755; - xfer += iprot->readString(_elem755); - this->tablesUsed.insert(_elem755); + std::string _elem775; + xfer += iprot->readString(_elem775); + this->tablesUsed.insert(_elem775); } xfer += iprot->readSetEnd(); } @@ -18382,10 +18929,10 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 3); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter756; - for (_iter756 = this->tablesUsed.begin(); _iter756 != this->tablesUsed.end(); ++_iter756) + std::set ::const_iterator _iter776; + for (_iter776 = this->tablesUsed.begin(); _iter776 != this->tablesUsed.end(); ++_iter776) { - xfer += oprot->writeString((*_iter756)); + xfer += oprot->writeString((*_iter776)); } xfer += oprot->writeSetEnd(); } @@ -18410,19 +18957,19 @@ void swap(CreationMetadata &a, CreationMetadata &b) { swap(a.__isset, b.__isset); } -CreationMetadata::CreationMetadata(const CreationMetadata& other757) { - dbName = other757.dbName; - tblName = other757.tblName; - tablesUsed = other757.tablesUsed; - validTxnList = other757.validTxnList; - __isset = other757.__isset; +CreationMetadata::CreationMetadata(const CreationMetadata& other777) { + dbName = other777.dbName; + tblName = other777.tblName; + tablesUsed = other777.tablesUsed; + validTxnList = other777.validTxnList; + __isset = other777.__isset; } -CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other758) { - dbName = other758.dbName; - tblName = other758.tblName; - tablesUsed = other758.tablesUsed; - validTxnList = other758.validTxnList; - __isset = other758.__isset; +CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other778) { + dbName = other778.dbName; + tblName = other778.tblName; + tablesUsed = other778.tablesUsed; + validTxnList = other778.validTxnList; + __isset = other778.__isset; return *this; } void CreationMetadata::printTo(std::ostream& out) const { @@ -18527,15 +19074,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other759) { - lastEvent = other759.lastEvent; - maxEvents = other759.maxEvents; - __isset = other759.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other779) { + lastEvent = other779.lastEvent; + maxEvents = other779.maxEvents; + __isset = other779.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other760) { - lastEvent = other760.lastEvent; - maxEvents = other760.maxEvents; - __isset = other760.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other780) { + lastEvent = other780.lastEvent; + maxEvents = other780.maxEvents; + __isset = other780.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -18736,25 +19283,25 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other761) { - eventId = other761.eventId; - eventTime = other761.eventTime; - eventType = other761.eventType; - dbName = other761.dbName; - tableName = other761.tableName; - message = other761.message; - messageFormat = other761.messageFormat; - __isset = other761.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other762) { - eventId = other762.eventId; - eventTime = other762.eventTime; - eventType = other762.eventType; - dbName = other762.dbName; - tableName = other762.tableName; - message = other762.message; - messageFormat = other762.messageFormat; - __isset = other762.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other781) { + eventId = other781.eventId; + eventTime = other781.eventTime; + eventType = other781.eventType; + dbName = other781.dbName; + tableName = other781.tableName; + message = other781.message; + messageFormat = other781.messageFormat; + __isset = other781.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other782) { + eventId = other782.eventId; + eventTime = other782.eventTime; + eventType = other782.eventType; + dbName = other782.dbName; + tableName = other782.tableName; + message = other782.message; + messageFormat = other782.messageFormat; + __isset = other782.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -18805,14 +19352,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size763; - ::apache::thrift::protocol::TType _etype766; - xfer += iprot->readListBegin(_etype766, _size763); - this->events.resize(_size763); - uint32_t _i767; - for (_i767 = 0; _i767 < _size763; ++_i767) + uint32_t _size783; + ::apache::thrift::protocol::TType _etype786; + xfer += iprot->readListBegin(_etype786, _size783); + this->events.resize(_size783); + uint32_t _i787; + for (_i787 = 0; _i787 < _size783; ++_i787) { - xfer += this->events[_i767].read(iprot); + xfer += this->events[_i787].read(iprot); } xfer += iprot->readListEnd(); } @@ -18843,10 +19390,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter768; - for (_iter768 = this->events.begin(); _iter768 != this->events.end(); ++_iter768) + std::vector ::const_iterator _iter788; + for (_iter788 = this->events.begin(); _iter788 != this->events.end(); ++_iter788) { - xfer += (*_iter768).write(oprot); + xfer += (*_iter788).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18862,11 +19409,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other769) { - events = other769.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other789) { + events = other789.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other770) { - events = other770.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other790) { + events = other790.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -18948,11 +19495,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other771) { - eventId = other771.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other791) { + eventId = other791.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other772) { - eventId = other772.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other792) { + eventId = other792.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -19054,13 +19601,13 @@ void swap(NotificationEventsCountRequest &a, NotificationEventsCountRequest &b) swap(a.dbName, b.dbName); } -NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other773) { - fromEventId = other773.fromEventId; - dbName = other773.dbName; +NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other793) { + fromEventId = other793.fromEventId; + dbName = other793.dbName; } -NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other774) { - fromEventId = other774.fromEventId; - dbName = other774.dbName; +NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other794) { + fromEventId = other794.fromEventId; + dbName = other794.dbName; return *this; } void NotificationEventsCountRequest::printTo(std::ostream& out) const { @@ -19143,11 +19690,11 @@ void swap(NotificationEventsCountResponse &a, NotificationEventsCountResponse &b swap(a.eventsCount, b.eventsCount); } -NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other775) { - eventsCount = other775.eventsCount; +NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other795) { + eventsCount = other795.eventsCount; } -NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other776) { - eventsCount = other776.eventsCount; +NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other796) { + eventsCount = other796.eventsCount; return *this; } void NotificationEventsCountResponse::printTo(std::ostream& out) const { @@ -19210,14 +19757,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size777; - ::apache::thrift::protocol::TType _etype780; - xfer += iprot->readListBegin(_etype780, _size777); - this->filesAdded.resize(_size777); - uint32_t _i781; - for (_i781 = 0; _i781 < _size777; ++_i781) + uint32_t _size797; + ::apache::thrift::protocol::TType _etype800; + xfer += iprot->readListBegin(_etype800, _size797); + this->filesAdded.resize(_size797); + uint32_t _i801; + for (_i801 = 0; _i801 < _size797; ++_i801) { - xfer += iprot->readString(this->filesAdded[_i781]); + xfer += iprot->readString(this->filesAdded[_i801]); } xfer += iprot->readListEnd(); } @@ -19230,14 +19777,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAddedChecksum.clear(); - uint32_t _size782; - ::apache::thrift::protocol::TType _etype785; - xfer += iprot->readListBegin(_etype785, _size782); - this->filesAddedChecksum.resize(_size782); - uint32_t _i786; - for (_i786 = 0; _i786 < _size782; ++_i786) + uint32_t _size802; + ::apache::thrift::protocol::TType _etype805; + xfer += iprot->readListBegin(_etype805, _size802); + this->filesAddedChecksum.resize(_size802); + uint32_t _i806; + for (_i806 = 0; _i806 < _size802; ++_i806) { - xfer += iprot->readString(this->filesAddedChecksum[_i786]); + xfer += iprot->readString(this->filesAddedChecksum[_i806]); } xfer += iprot->readListEnd(); } @@ -19273,10 +19820,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter787; - for (_iter787 = this->filesAdded.begin(); _iter787 != this->filesAdded.end(); ++_iter787) + std::vector ::const_iterator _iter807; + for (_iter807 = this->filesAdded.begin(); _iter807 != this->filesAdded.end(); ++_iter807) { - xfer += oprot->writeString((*_iter787)); + xfer += oprot->writeString((*_iter807)); } xfer += oprot->writeListEnd(); } @@ -19286,10 +19833,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAddedChecksum.size())); - std::vector ::const_iterator _iter788; - for (_iter788 = this->filesAddedChecksum.begin(); _iter788 != this->filesAddedChecksum.end(); ++_iter788) + std::vector ::const_iterator _iter808; + for (_iter808 = this->filesAddedChecksum.begin(); _iter808 != this->filesAddedChecksum.end(); ++_iter808) { - xfer += oprot->writeString((*_iter788)); + xfer += oprot->writeString((*_iter808)); } xfer += oprot->writeListEnd(); } @@ -19308,17 +19855,17 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.__isset, b.__isset); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other789) { - replace = other789.replace; - filesAdded = other789.filesAdded; - filesAddedChecksum = other789.filesAddedChecksum; - __isset = other789.__isset; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other809) { + replace = other809.replace; + filesAdded = other809.filesAdded; + filesAddedChecksum = other809.filesAddedChecksum; + __isset = other809.__isset; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other790) { - replace = other790.replace; - filesAdded = other790.filesAdded; - filesAddedChecksum = other790.filesAddedChecksum; - __isset = other790.__isset; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other810) { + replace = other810.replace; + filesAdded = other810.filesAdded; + filesAddedChecksum = other810.filesAddedChecksum; + __isset = other810.__isset; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -19400,13 +19947,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other791) { - insertData = other791.insertData; - __isset = other791.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other811) { + insertData = other811.insertData; + __isset = other811.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other792) { - insertData = other792.insertData; - __isset = other792.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other812) { + insertData = other812.insertData; + __isset = other812.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -19503,14 +20050,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size793; - ::apache::thrift::protocol::TType _etype796; - xfer += iprot->readListBegin(_etype796, _size793); - this->partitionVals.resize(_size793); - uint32_t _i797; - for (_i797 = 0; _i797 < _size793; ++_i797) + uint32_t _size813; + ::apache::thrift::protocol::TType _etype816; + xfer += iprot->readListBegin(_etype816, _size813); + this->partitionVals.resize(_size813); + uint32_t _i817; + for (_i817 = 0; _i817 < _size813; ++_i817) { - xfer += iprot->readString(this->partitionVals[_i797]); + xfer += iprot->readString(this->partitionVals[_i817]); } xfer += iprot->readListEnd(); } @@ -19562,10 +20109,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter798; - for (_iter798 = this->partitionVals.begin(); _iter798 != this->partitionVals.end(); ++_iter798) + std::vector ::const_iterator _iter818; + for (_iter818 = this->partitionVals.begin(); _iter818 != this->partitionVals.end(); ++_iter818) { - xfer += oprot->writeString((*_iter798)); + xfer += oprot->writeString((*_iter818)); } xfer += oprot->writeListEnd(); } @@ -19586,21 +20133,21 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other799) { - successful = other799.successful; - data = other799.data; - dbName = other799.dbName; - tableName = other799.tableName; - partitionVals = other799.partitionVals; - __isset = other799.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other800) { - successful = other800.successful; - data = other800.data; - dbName = other800.dbName; - tableName = other800.tableName; - partitionVals = other800.partitionVals; - __isset = other800.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other819) { + successful = other819.successful; + data = other819.data; + dbName = other819.dbName; + tableName = other819.tableName; + partitionVals = other819.partitionVals; + __isset = other819.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other820) { + successful = other820.successful; + data = other820.data; + dbName = other820.dbName; + tableName = other820.tableName; + partitionVals = other820.partitionVals; + __isset = other820.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -19663,11 +20210,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other801) { - (void) other801; +FireEventResponse::FireEventResponse(const FireEventResponse& other821) { + (void) other821; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other802) { - (void) other802; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other822) { + (void) other822; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -19767,15 +20314,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other803) { - metadata = other803.metadata; - includeBitset = other803.includeBitset; - __isset = other803.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other823) { + metadata = other823.metadata; + includeBitset = other823.includeBitset; + __isset = other823.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other804) { - metadata = other804.metadata; - includeBitset = other804.includeBitset; - __isset = other804.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other824) { + metadata = other824.metadata; + includeBitset = other824.includeBitset; + __isset = other824.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -19826,17 +20373,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size805; - ::apache::thrift::protocol::TType _ktype806; - ::apache::thrift::protocol::TType _vtype807; - xfer += iprot->readMapBegin(_ktype806, _vtype807, _size805); - uint32_t _i809; - for (_i809 = 0; _i809 < _size805; ++_i809) + uint32_t _size825; + ::apache::thrift::protocol::TType _ktype826; + ::apache::thrift::protocol::TType _vtype827; + xfer += iprot->readMapBegin(_ktype826, _vtype827, _size825); + uint32_t _i829; + for (_i829 = 0; _i829 < _size825; ++_i829) { - int64_t _key810; - xfer += iprot->readI64(_key810); - MetadataPpdResult& _val811 = this->metadata[_key810]; - xfer += _val811.read(iprot); + int64_t _key830; + xfer += iprot->readI64(_key830); + MetadataPpdResult& _val831 = this->metadata[_key830]; + xfer += _val831.read(iprot); } xfer += iprot->readMapEnd(); } @@ -19877,11 +20424,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter812; - for (_iter812 = this->metadata.begin(); _iter812 != this->metadata.end(); ++_iter812) + std::map ::const_iterator _iter832; + for (_iter832 = this->metadata.begin(); _iter832 != this->metadata.end(); ++_iter832) { - xfer += oprot->writeI64(_iter812->first); - xfer += _iter812->second.write(oprot); + xfer += oprot->writeI64(_iter832->first); + xfer += _iter832->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -19902,13 +20449,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other813) { - metadata = other813.metadata; - isSupported = other813.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other833) { + metadata = other833.metadata; + isSupported = other833.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other814) { - metadata = other814.metadata; - isSupported = other814.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other834) { + metadata = other834.metadata; + isSupported = other834.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -19969,14 +20516,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size815; - ::apache::thrift::protocol::TType _etype818; - xfer += iprot->readListBegin(_etype818, _size815); - this->fileIds.resize(_size815); - uint32_t _i819; - for (_i819 = 0; _i819 < _size815; ++_i819) + uint32_t _size835; + ::apache::thrift::protocol::TType _etype838; + xfer += iprot->readListBegin(_etype838, _size835); + this->fileIds.resize(_size835); + uint32_t _i839; + for (_i839 = 0; _i839 < _size835; ++_i839) { - xfer += iprot->readI64(this->fileIds[_i819]); + xfer += iprot->readI64(this->fileIds[_i839]); } xfer += iprot->readListEnd(); } @@ -20003,9 +20550,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast820; - xfer += iprot->readI32(ecast820); - this->type = (FileMetadataExprType::type)ecast820; + int32_t ecast840; + xfer += iprot->readI32(ecast840); + this->type = (FileMetadataExprType::type)ecast840; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -20035,10 +20582,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter821; - for (_iter821 = this->fileIds.begin(); _iter821 != this->fileIds.end(); ++_iter821) + std::vector ::const_iterator _iter841; + for (_iter841 = this->fileIds.begin(); _iter841 != this->fileIds.end(); ++_iter841) { - xfer += oprot->writeI64((*_iter821)); + xfer += oprot->writeI64((*_iter841)); } xfer += oprot->writeListEnd(); } @@ -20072,19 +20619,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other822) { - fileIds = other822.fileIds; - expr = other822.expr; - doGetFooters = other822.doGetFooters; - type = other822.type; - __isset = other822.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other842) { + fileIds = other842.fileIds; + expr = other842.expr; + doGetFooters = other842.doGetFooters; + type = other842.type; + __isset = other842.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other823) { - fileIds = other823.fileIds; - expr = other823.expr; - doGetFooters = other823.doGetFooters; - type = other823.type; - __isset = other823.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other843) { + fileIds = other843.fileIds; + expr = other843.expr; + doGetFooters = other843.doGetFooters; + type = other843.type; + __isset = other843.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -20137,17 +20684,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size824; - ::apache::thrift::protocol::TType _ktype825; - ::apache::thrift::protocol::TType _vtype826; - xfer += iprot->readMapBegin(_ktype825, _vtype826, _size824); - uint32_t _i828; - for (_i828 = 0; _i828 < _size824; ++_i828) + uint32_t _size844; + ::apache::thrift::protocol::TType _ktype845; + ::apache::thrift::protocol::TType _vtype846; + xfer += iprot->readMapBegin(_ktype845, _vtype846, _size844); + uint32_t _i848; + for (_i848 = 0; _i848 < _size844; ++_i848) { - int64_t _key829; - xfer += iprot->readI64(_key829); - std::string& _val830 = this->metadata[_key829]; - xfer += iprot->readBinary(_val830); + int64_t _key849; + xfer += iprot->readI64(_key849); + std::string& _val850 = this->metadata[_key849]; + xfer += iprot->readBinary(_val850); } xfer += iprot->readMapEnd(); } @@ -20188,11 +20735,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter831; - for (_iter831 = this->metadata.begin(); _iter831 != this->metadata.end(); ++_iter831) + std::map ::const_iterator _iter851; + for (_iter851 = this->metadata.begin(); _iter851 != this->metadata.end(); ++_iter851) { - xfer += oprot->writeI64(_iter831->first); - xfer += oprot->writeBinary(_iter831->second); + xfer += oprot->writeI64(_iter851->first); + xfer += oprot->writeBinary(_iter851->second); } xfer += oprot->writeMapEnd(); } @@ -20213,13 +20760,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other832) { - metadata = other832.metadata; - isSupported = other832.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other852) { + metadata = other852.metadata; + isSupported = other852.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other833) { - metadata = other833.metadata; - isSupported = other833.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other853) { + metadata = other853.metadata; + isSupported = other853.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -20265,14 +20812,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size834; - ::apache::thrift::protocol::TType _etype837; - xfer += iprot->readListBegin(_etype837, _size834); - this->fileIds.resize(_size834); - uint32_t _i838; - for (_i838 = 0; _i838 < _size834; ++_i838) + uint32_t _size854; + ::apache::thrift::protocol::TType _etype857; + xfer += iprot->readListBegin(_etype857, _size854); + this->fileIds.resize(_size854); + uint32_t _i858; + for (_i858 = 0; _i858 < _size854; ++_i858) { - xfer += iprot->readI64(this->fileIds[_i838]); + xfer += iprot->readI64(this->fileIds[_i858]); } xfer += iprot->readListEnd(); } @@ -20303,10 +20850,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter839; - for (_iter839 = this->fileIds.begin(); _iter839 != this->fileIds.end(); ++_iter839) + std::vector ::const_iterator _iter859; + for (_iter859 = this->fileIds.begin(); _iter859 != this->fileIds.end(); ++_iter859) { - xfer += oprot->writeI64((*_iter839)); + xfer += oprot->writeI64((*_iter859)); } xfer += oprot->writeListEnd(); } @@ -20322,11 +20869,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other840) { - fileIds = other840.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other860) { + fileIds = other860.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other841) { - fileIds = other841.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other861) { + fileIds = other861.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -20385,11 +20932,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other842) { - (void) other842; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other862) { + (void) other862; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other843) { - (void) other843; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other863) { + (void) other863; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -20443,14 +20990,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size844; - ::apache::thrift::protocol::TType _etype847; - xfer += iprot->readListBegin(_etype847, _size844); - this->fileIds.resize(_size844); - uint32_t _i848; - for (_i848 = 0; _i848 < _size844; ++_i848) + uint32_t _size864; + ::apache::thrift::protocol::TType _etype867; + xfer += iprot->readListBegin(_etype867, _size864); + this->fileIds.resize(_size864); + uint32_t _i868; + for (_i868 = 0; _i868 < _size864; ++_i868) { - xfer += iprot->readI64(this->fileIds[_i848]); + xfer += iprot->readI64(this->fileIds[_i868]); } xfer += iprot->readListEnd(); } @@ -20463,14 +21010,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size849; - ::apache::thrift::protocol::TType _etype852; - xfer += iprot->readListBegin(_etype852, _size849); - this->metadata.resize(_size849); - uint32_t _i853; - for (_i853 = 0; _i853 < _size849; ++_i853) + uint32_t _size869; + ::apache::thrift::protocol::TType _etype872; + xfer += iprot->readListBegin(_etype872, _size869); + this->metadata.resize(_size869); + uint32_t _i873; + for (_i873 = 0; _i873 < _size869; ++_i873) { - xfer += iprot->readBinary(this->metadata[_i853]); + xfer += iprot->readBinary(this->metadata[_i873]); } xfer += iprot->readListEnd(); } @@ -20481,9 +21028,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast854; - xfer += iprot->readI32(ecast854); - this->type = (FileMetadataExprType::type)ecast854; + int32_t ecast874; + xfer += iprot->readI32(ecast874); + this->type = (FileMetadataExprType::type)ecast874; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -20513,10 +21060,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter855; - for (_iter855 = this->fileIds.begin(); _iter855 != this->fileIds.end(); ++_iter855) + std::vector ::const_iterator _iter875; + for (_iter875 = this->fileIds.begin(); _iter875 != this->fileIds.end(); ++_iter875) { - xfer += oprot->writeI64((*_iter855)); + xfer += oprot->writeI64((*_iter875)); } xfer += oprot->writeListEnd(); } @@ -20525,10 +21072,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter856; - for (_iter856 = this->metadata.begin(); _iter856 != this->metadata.end(); ++_iter856) + std::vector ::const_iterator _iter876; + for (_iter876 = this->metadata.begin(); _iter876 != this->metadata.end(); ++_iter876) { - xfer += oprot->writeBinary((*_iter856)); + xfer += oprot->writeBinary((*_iter876)); } xfer += oprot->writeListEnd(); } @@ -20552,17 +21099,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other857) { - fileIds = other857.fileIds; - metadata = other857.metadata; - type = other857.type; - __isset = other857.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other877) { + fileIds = other877.fileIds; + metadata = other877.metadata; + type = other877.type; + __isset = other877.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other858) { - fileIds = other858.fileIds; - metadata = other858.metadata; - type = other858.type; - __isset = other858.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other878) { + fileIds = other878.fileIds; + metadata = other878.metadata; + type = other878.type; + __isset = other878.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -20623,11 +21170,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other859) { - (void) other859; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other879) { + (void) other879; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other860) { - (void) other860; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other880) { + (void) other880; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -20671,14 +21218,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size861; - ::apache::thrift::protocol::TType _etype864; - xfer += iprot->readListBegin(_etype864, _size861); - this->fileIds.resize(_size861); - uint32_t _i865; - for (_i865 = 0; _i865 < _size861; ++_i865) + uint32_t _size881; + ::apache::thrift::protocol::TType _etype884; + xfer += iprot->readListBegin(_etype884, _size881); + this->fileIds.resize(_size881); + uint32_t _i885; + for (_i885 = 0; _i885 < _size881; ++_i885) { - xfer += iprot->readI64(this->fileIds[_i865]); + xfer += iprot->readI64(this->fileIds[_i885]); } xfer += iprot->readListEnd(); } @@ -20709,10 +21256,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter866; - for (_iter866 = this->fileIds.begin(); _iter866 != this->fileIds.end(); ++_iter866) + std::vector ::const_iterator _iter886; + for (_iter886 = this->fileIds.begin(); _iter886 != this->fileIds.end(); ++_iter886) { - xfer += oprot->writeI64((*_iter866)); + xfer += oprot->writeI64((*_iter886)); } xfer += oprot->writeListEnd(); } @@ -20728,11 +21275,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other867) { - fileIds = other867.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other887) { + fileIds = other887.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other868) { - fileIds = other868.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other888) { + fileIds = other888.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -20814,11 +21361,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other869) { - isSupported = other869.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other889) { + isSupported = other889.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other870) { - isSupported = other870.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other890) { + isSupported = other890.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -20959,19 +21506,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other871) { - dbName = other871.dbName; - tblName = other871.tblName; - partName = other871.partName; - isAllParts = other871.isAllParts; - __isset = other871.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other891) { + dbName = other891.dbName; + tblName = other891.tblName; + partName = other891.partName; + isAllParts = other891.isAllParts; + __isset = other891.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other872) { - dbName = other872.dbName; - tblName = other872.tblName; - partName = other872.partName; - isAllParts = other872.isAllParts; - __isset = other872.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other892) { + dbName = other892.dbName; + tblName = other892.tblName; + partName = other892.partName; + isAllParts = other892.isAllParts; + __isset = other892.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -21019,14 +21566,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size873; - ::apache::thrift::protocol::TType _etype876; - xfer += iprot->readListBegin(_etype876, _size873); - this->functions.resize(_size873); - uint32_t _i877; - for (_i877 = 0; _i877 < _size873; ++_i877) + uint32_t _size893; + ::apache::thrift::protocol::TType _etype896; + xfer += iprot->readListBegin(_etype896, _size893); + this->functions.resize(_size893); + uint32_t _i897; + for (_i897 = 0; _i897 < _size893; ++_i897) { - xfer += this->functions[_i877].read(iprot); + xfer += this->functions[_i897].read(iprot); } xfer += iprot->readListEnd(); } @@ -21056,10 +21603,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter878; - for (_iter878 = this->functions.begin(); _iter878 != this->functions.end(); ++_iter878) + std::vector ::const_iterator _iter898; + for (_iter898 = this->functions.begin(); _iter898 != this->functions.end(); ++_iter898) { - xfer += (*_iter878).write(oprot); + xfer += (*_iter898).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21076,13 +21623,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other879) { - functions = other879.functions; - __isset = other879.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other899) { + functions = other899.functions; + __isset = other899.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other880) { - functions = other880.functions; - __isset = other880.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other900) { + functions = other900.functions; + __isset = other900.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -21127,16 +21674,16 @@ uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size881; - ::apache::thrift::protocol::TType _etype884; - xfer += iprot->readListBegin(_etype884, _size881); - this->values.resize(_size881); - uint32_t _i885; - for (_i885 = 0; _i885 < _size881; ++_i885) + uint32_t _size901; + ::apache::thrift::protocol::TType _etype904; + xfer += iprot->readListBegin(_etype904, _size901); + this->values.resize(_size901); + uint32_t _i905; + for (_i905 = 0; _i905 < _size901; ++_i905) { - int32_t ecast886; - xfer += iprot->readI32(ecast886); - this->values[_i885] = (ClientCapability::type)ecast886; + int32_t ecast906; + xfer += iprot->readI32(ecast906); + this->values[_i905] = (ClientCapability::type)ecast906; } xfer += iprot->readListEnd(); } @@ -21167,10 +21714,10 @@ uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); - std::vector ::const_iterator _iter887; - for (_iter887 = this->values.begin(); _iter887 != this->values.end(); ++_iter887) + std::vector ::const_iterator _iter907; + for (_iter907 = this->values.begin(); _iter907 != this->values.end(); ++_iter907) { - xfer += oprot->writeI32((int32_t)(*_iter887)); + xfer += oprot->writeI32((int32_t)(*_iter907)); } xfer += oprot->writeListEnd(); } @@ -21186,11 +21733,11 @@ void swap(ClientCapabilities &a, ClientCapabilities &b) { swap(a.values, b.values); } -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other888) { - values = other888.values; +ClientCapabilities::ClientCapabilities(const ClientCapabilities& other908) { + values = other908.values; } -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other889) { - values = other889.values; +ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other909) { + values = other909.values; return *this; } void ClientCapabilities::printTo(std::ostream& out) const { @@ -21312,17 +21859,17 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.__isset, b.__isset); } -GetTableRequest::GetTableRequest(const GetTableRequest& other890) { - dbName = other890.dbName; - tblName = other890.tblName; - capabilities = other890.capabilities; - __isset = other890.__isset; +GetTableRequest::GetTableRequest(const GetTableRequest& other910) { + dbName = other910.dbName; + tblName = other910.tblName; + capabilities = other910.capabilities; + __isset = other910.__isset; } -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other891) { - dbName = other891.dbName; - tblName = other891.tblName; - capabilities = other891.capabilities; - __isset = other891.__isset; +GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other911) { + dbName = other911.dbName; + tblName = other911.tblName; + capabilities = other911.capabilities; + __isset = other911.__isset; return *this; } void GetTableRequest::printTo(std::ostream& out) const { @@ -21406,11 +21953,11 @@ void swap(GetTableResult &a, GetTableResult &b) { swap(a.table, b.table); } -GetTableResult::GetTableResult(const GetTableResult& other892) { - table = other892.table; +GetTableResult::GetTableResult(const GetTableResult& other912) { + table = other912.table; } -GetTableResult& GetTableResult::operator=(const GetTableResult& other893) { - table = other893.table; +GetTableResult& GetTableResult::operator=(const GetTableResult& other913) { + table = other913.table; return *this; } void GetTableResult::printTo(std::ostream& out) const { @@ -21473,14 +22020,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblNames.clear(); - uint32_t _size894; - ::apache::thrift::protocol::TType _etype897; - xfer += iprot->readListBegin(_etype897, _size894); - this->tblNames.resize(_size894); - uint32_t _i898; - for (_i898 = 0; _i898 < _size894; ++_i898) + uint32_t _size914; + ::apache::thrift::protocol::TType _etype917; + xfer += iprot->readListBegin(_etype917, _size914); + this->tblNames.resize(_size914); + uint32_t _i918; + for (_i918 = 0; _i918 < _size914; ++_i918) { - xfer += iprot->readString(this->tblNames[_i898]); + xfer += iprot->readString(this->tblNames[_i918]); } xfer += iprot->readListEnd(); } @@ -21524,10 +22071,10 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tblNames.size())); - std::vector ::const_iterator _iter899; - for (_iter899 = this->tblNames.begin(); _iter899 != this->tblNames.end(); ++_iter899) + std::vector ::const_iterator _iter919; + for (_iter919 = this->tblNames.begin(); _iter919 != this->tblNames.end(); ++_iter919) { - xfer += oprot->writeString((*_iter899)); + xfer += oprot->writeString((*_iter919)); } xfer += oprot->writeListEnd(); } @@ -21551,17 +22098,17 @@ void swap(GetTablesRequest &a, GetTablesRequest &b) { swap(a.__isset, b.__isset); } -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other900) { - dbName = other900.dbName; - tblNames = other900.tblNames; - capabilities = other900.capabilities; - __isset = other900.__isset; +GetTablesRequest::GetTablesRequest(const GetTablesRequest& other920) { + dbName = other920.dbName; + tblNames = other920.tblNames; + capabilities = other920.capabilities; + __isset = other920.__isset; } -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other901) { - dbName = other901.dbName; - tblNames = other901.tblNames; - capabilities = other901.capabilities; - __isset = other901.__isset; +GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other921) { + dbName = other921.dbName; + tblNames = other921.tblNames; + capabilities = other921.capabilities; + __isset = other921.__isset; return *this; } void GetTablesRequest::printTo(std::ostream& out) const { @@ -21608,14 +22155,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tables.clear(); - uint32_t _size902; - ::apache::thrift::protocol::TType _etype905; - xfer += iprot->readListBegin(_etype905, _size902); - this->tables.resize(_size902); - uint32_t _i906; - for (_i906 = 0; _i906 < _size902; ++_i906) + uint32_t _size922; + ::apache::thrift::protocol::TType _etype925; + xfer += iprot->readListBegin(_etype925, _size922); + this->tables.resize(_size922); + uint32_t _i926; + for (_i926 = 0; _i926 < _size922; ++_i926) { - xfer += this->tables[_i906].read(iprot); + xfer += this->tables[_i926].read(iprot); } xfer += iprot->readListEnd(); } @@ -21646,10 +22193,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tables.size())); - std::vector
::const_iterator _iter907; - for (_iter907 = this->tables.begin(); _iter907 != this->tables.end(); ++_iter907) + std::vector
::const_iterator _iter927; + for (_iter927 = this->tables.begin(); _iter927 != this->tables.end(); ++_iter927) { - xfer += (*_iter907).write(oprot); + xfer += (*_iter927).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21665,11 +22212,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) { swap(a.tables, b.tables); } -GetTablesResult::GetTablesResult(const GetTablesResult& other908) { - tables = other908.tables; +GetTablesResult::GetTablesResult(const GetTablesResult& other928) { + tables = other928.tables; } -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other909) { - tables = other909.tables; +GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other929) { + tables = other929.tables; return *this; } void GetTablesResult::printTo(std::ostream& out) const { @@ -21771,13 +22318,13 @@ void swap(CmRecycleRequest &a, CmRecycleRequest &b) { swap(a.purge, b.purge); } -CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other910) { - dataPath = other910.dataPath; - purge = other910.purge; +CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other930) { + dataPath = other930.dataPath; + purge = other930.purge; } -CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other911) { - dataPath = other911.dataPath; - purge = other911.purge; +CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other931) { + dataPath = other931.dataPath; + purge = other931.purge; return *this; } void CmRecycleRequest::printTo(std::ostream& out) const { @@ -21837,11 +22384,11 @@ void swap(CmRecycleResponse &a, CmRecycleResponse &b) { (void) b; } -CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other912) { - (void) other912; +CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other932) { + (void) other932; } -CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other913) { - (void) other913; +CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other933) { + (void) other933; return *this; } void CmRecycleResponse::printTo(std::ostream& out) const { @@ -21982,19 +22529,19 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other914) { - dbName = other914.dbName; - tableName = other914.tableName; - tableType = other914.tableType; - comments = other914.comments; - __isset = other914.__isset; +TableMeta::TableMeta(const TableMeta& other934) { + dbName = other934.dbName; + tableName = other934.tableName; + tableType = other934.tableType; + comments = other934.comments; + __isset = other934.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other915) { - dbName = other915.dbName; - tableName = other915.tableName; - tableType = other915.tableType; - comments = other915.comments; - __isset = other915.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other935) { + dbName = other935.dbName; + tableName = other935.tableName; + tableType = other935.tableType; + comments = other935.comments; + __isset = other935.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -22052,15 +22599,15 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size916; - ::apache::thrift::protocol::TType _etype919; - xfer += iprot->readSetBegin(_etype919, _size916); - uint32_t _i920; - for (_i920 = 0; _i920 < _size916; ++_i920) + uint32_t _size936; + ::apache::thrift::protocol::TType _etype939; + xfer += iprot->readSetBegin(_etype939, _size936); + uint32_t _i940; + for (_i940 = 0; _i940 < _size936; ++_i940) { - std::string _elem921; - xfer += iprot->readString(_elem921); - this->tablesUsed.insert(_elem921); + std::string _elem941; + xfer += iprot->readString(_elem941); + this->tablesUsed.insert(_elem941); } xfer += iprot->readSetEnd(); } @@ -22109,10 +22656,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter922; - for (_iter922 = this->tablesUsed.begin(); _iter922 != this->tablesUsed.end(); ++_iter922) + std::set ::const_iterator _iter942; + for (_iter942 = this->tablesUsed.begin(); _iter942 != this->tablesUsed.end(); ++_iter942) { - xfer += oprot->writeString((*_iter922)); + xfer += oprot->writeString((*_iter942)); } xfer += oprot->writeSetEnd(); } @@ -22140,17 +22687,17 @@ void swap(Materialization &a, Materialization &b) { swap(a.__isset, b.__isset); } -Materialization::Materialization(const Materialization& other923) { - tablesUsed = other923.tablesUsed; - validTxnList = other923.validTxnList; - invalidationTime = other923.invalidationTime; - __isset = other923.__isset; +Materialization::Materialization(const Materialization& other943) { + tablesUsed = other943.tablesUsed; + validTxnList = other943.validTxnList; + invalidationTime = other943.invalidationTime; + __isset = other943.__isset; } -Materialization& Materialization::operator=(const Materialization& other924) { - tablesUsed = other924.tablesUsed; - validTxnList = other924.validTxnList; - invalidationTime = other924.invalidationTime; - __isset = other924.__isset; +Materialization& Materialization::operator=(const Materialization& other944) { + tablesUsed = other944.tablesUsed; + validTxnList = other944.validTxnList; + invalidationTime = other944.invalidationTime; + __isset = other944.__isset; return *this; } void Materialization::printTo(std::ostream& out) const { @@ -22218,9 +22765,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast925; - xfer += iprot->readI32(ecast925); - this->status = (WMResourcePlanStatus::type)ecast925; + int32_t ecast945; + xfer += iprot->readI32(ecast945); + this->status = (WMResourcePlanStatus::type)ecast945; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -22294,19 +22841,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) { swap(a.__isset, b.__isset); } -WMResourcePlan::WMResourcePlan(const WMResourcePlan& other926) { - name = other926.name; - status = other926.status; - queryParallelism = other926.queryParallelism; - defaultPoolPath = other926.defaultPoolPath; - __isset = other926.__isset; +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other946) { + name = other946.name; + status = other946.status; + queryParallelism = other946.queryParallelism; + defaultPoolPath = other946.defaultPoolPath; + __isset = other946.__isset; } -WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other927) { - name = other927.name; - status = other927.status; - queryParallelism = other927.queryParallelism; - defaultPoolPath = other927.defaultPoolPath; - __isset = other927.__isset; +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other947) { + name = other947.name; + status = other947.status; + queryParallelism = other947.queryParallelism; + defaultPoolPath = other947.defaultPoolPath; + __isset = other947.__isset; return *this; } void WMResourcePlan::printTo(std::ostream& out) const { @@ -22385,9 +22932,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast928; - xfer += iprot->readI32(ecast928); - this->status = (WMResourcePlanStatus::type)ecast928; + int32_t ecast948; + xfer += iprot->readI32(ecast948); + this->status = (WMResourcePlanStatus::type)ecast948; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -22489,23 +23036,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) { swap(a.__isset, b.__isset); } -WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other929) { - name = other929.name; - status = other929.status; - queryParallelism = other929.queryParallelism; - isSetQueryParallelism = other929.isSetQueryParallelism; - defaultPoolPath = other929.defaultPoolPath; - isSetDefaultPoolPath = other929.isSetDefaultPoolPath; - __isset = other929.__isset; -} -WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other930) { - name = other930.name; - status = other930.status; - queryParallelism = other930.queryParallelism; - isSetQueryParallelism = other930.isSetQueryParallelism; - defaultPoolPath = other930.defaultPoolPath; - isSetDefaultPoolPath = other930.isSetDefaultPoolPath; - __isset = other930.__isset; +WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other949) { + name = other949.name; + status = other949.status; + queryParallelism = other949.queryParallelism; + isSetQueryParallelism = other949.isSetQueryParallelism; + defaultPoolPath = other949.defaultPoolPath; + isSetDefaultPoolPath = other949.isSetDefaultPoolPath; + __isset = other949.__isset; +} +WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other950) { + name = other950.name; + status = other950.status; + queryParallelism = other950.queryParallelism; + isSetQueryParallelism = other950.isSetQueryParallelism; + defaultPoolPath = other950.defaultPoolPath; + isSetDefaultPoolPath = other950.isSetDefaultPoolPath; + __isset = other950.__isset; return *this; } void WMNullableResourcePlan::printTo(std::ostream& out) const { @@ -22670,21 +23217,21 @@ void swap(WMPool &a, WMPool &b) { swap(a.__isset, b.__isset); } -WMPool::WMPool(const WMPool& other931) { - resourcePlanName = other931.resourcePlanName; - poolPath = other931.poolPath; - allocFraction = other931.allocFraction; - queryParallelism = other931.queryParallelism; - schedulingPolicy = other931.schedulingPolicy; - __isset = other931.__isset; -} -WMPool& WMPool::operator=(const WMPool& other932) { - resourcePlanName = other932.resourcePlanName; - poolPath = other932.poolPath; - allocFraction = other932.allocFraction; - queryParallelism = other932.queryParallelism; - schedulingPolicy = other932.schedulingPolicy; - __isset = other932.__isset; +WMPool::WMPool(const WMPool& other951) { + resourcePlanName = other951.resourcePlanName; + poolPath = other951.poolPath; + allocFraction = other951.allocFraction; + queryParallelism = other951.queryParallelism; + schedulingPolicy = other951.schedulingPolicy; + __isset = other951.__isset; +} +WMPool& WMPool::operator=(const WMPool& other952) { + resourcePlanName = other952.resourcePlanName; + poolPath = other952.poolPath; + allocFraction = other952.allocFraction; + queryParallelism = other952.queryParallelism; + schedulingPolicy = other952.schedulingPolicy; + __isset = other952.__isset; return *this; } void WMPool::printTo(std::ostream& out) const { @@ -22867,23 +23414,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) { swap(a.__isset, b.__isset); } -WMNullablePool::WMNullablePool(const WMNullablePool& other933) { - resourcePlanName = other933.resourcePlanName; - poolPath = other933.poolPath; - allocFraction = other933.allocFraction; - queryParallelism = other933.queryParallelism; - schedulingPolicy = other933.schedulingPolicy; - isSetSchedulingPolicy = other933.isSetSchedulingPolicy; - __isset = other933.__isset; -} -WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other934) { - resourcePlanName = other934.resourcePlanName; - poolPath = other934.poolPath; - allocFraction = other934.allocFraction; - queryParallelism = other934.queryParallelism; - schedulingPolicy = other934.schedulingPolicy; - isSetSchedulingPolicy = other934.isSetSchedulingPolicy; - __isset = other934.__isset; +WMNullablePool::WMNullablePool(const WMNullablePool& other953) { + resourcePlanName = other953.resourcePlanName; + poolPath = other953.poolPath; + allocFraction = other953.allocFraction; + queryParallelism = other953.queryParallelism; + schedulingPolicy = other953.schedulingPolicy; + isSetSchedulingPolicy = other953.isSetSchedulingPolicy; + __isset = other953.__isset; +} +WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other954) { + resourcePlanName = other954.resourcePlanName; + poolPath = other954.poolPath; + allocFraction = other954.allocFraction; + queryParallelism = other954.queryParallelism; + schedulingPolicy = other954.schedulingPolicy; + isSetSchedulingPolicy = other954.isSetSchedulingPolicy; + __isset = other954.__isset; return *this; } void WMNullablePool::printTo(std::ostream& out) const { @@ -23048,21 +23595,21 @@ void swap(WMTrigger &a, WMTrigger &b) { swap(a.__isset, b.__isset); } -WMTrigger::WMTrigger(const WMTrigger& other935) { - resourcePlanName = other935.resourcePlanName; - triggerName = other935.triggerName; - triggerExpression = other935.triggerExpression; - actionExpression = other935.actionExpression; - isInUnmanaged = other935.isInUnmanaged; - __isset = other935.__isset; -} -WMTrigger& WMTrigger::operator=(const WMTrigger& other936) { - resourcePlanName = other936.resourcePlanName; - triggerName = other936.triggerName; - triggerExpression = other936.triggerExpression; - actionExpression = other936.actionExpression; - isInUnmanaged = other936.isInUnmanaged; - __isset = other936.__isset; +WMTrigger::WMTrigger(const WMTrigger& other955) { + resourcePlanName = other955.resourcePlanName; + triggerName = other955.triggerName; + triggerExpression = other955.triggerExpression; + actionExpression = other955.actionExpression; + isInUnmanaged = other955.isInUnmanaged; + __isset = other955.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other956) { + resourcePlanName = other956.resourcePlanName; + triggerName = other956.triggerName; + triggerExpression = other956.triggerExpression; + actionExpression = other956.actionExpression; + isInUnmanaged = other956.isInUnmanaged; + __isset = other956.__isset; return *this; } void WMTrigger::printTo(std::ostream& out) const { @@ -23227,21 +23774,21 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.__isset, b.__isset); } -WMMapping::WMMapping(const WMMapping& other937) { - resourcePlanName = other937.resourcePlanName; - entityType = other937.entityType; - entityName = other937.entityName; - poolPath = other937.poolPath; - ordering = other937.ordering; - __isset = other937.__isset; -} -WMMapping& WMMapping::operator=(const WMMapping& other938) { - resourcePlanName = other938.resourcePlanName; - entityType = other938.entityType; - entityName = other938.entityName; - poolPath = other938.poolPath; - ordering = other938.ordering; - __isset = other938.__isset; +WMMapping::WMMapping(const WMMapping& other957) { + resourcePlanName = other957.resourcePlanName; + entityType = other957.entityType; + entityName = other957.entityName; + poolPath = other957.poolPath; + ordering = other957.ordering; + __isset = other957.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other958) { + resourcePlanName = other958.resourcePlanName; + entityType = other958.entityType; + entityName = other958.entityName; + poolPath = other958.poolPath; + ordering = other958.ordering; + __isset = other958.__isset; return *this; } void WMMapping::printTo(std::ostream& out) const { @@ -23347,13 +23894,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) { swap(a.trigger, b.trigger); } -WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other939) { - pool = other939.pool; - trigger = other939.trigger; +WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other959) { + pool = other959.pool; + trigger = other959.trigger; } -WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other940) { - pool = other940.pool; - trigger = other940.trigger; +WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other960) { + pool = other960.pool; + trigger = other960.trigger; return *this; } void WMPoolTrigger::printTo(std::ostream& out) const { @@ -23427,14 +23974,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->pools.clear(); - uint32_t _size941; - ::apache::thrift::protocol::TType _etype944; - xfer += iprot->readListBegin(_etype944, _size941); - this->pools.resize(_size941); - uint32_t _i945; - for (_i945 = 0; _i945 < _size941; ++_i945) + uint32_t _size961; + ::apache::thrift::protocol::TType _etype964; + xfer += iprot->readListBegin(_etype964, _size961); + this->pools.resize(_size961); + uint32_t _i965; + for (_i965 = 0; _i965 < _size961; ++_i965) { - xfer += this->pools[_i945].read(iprot); + xfer += this->pools[_i965].read(iprot); } xfer += iprot->readListEnd(); } @@ -23447,14 +23994,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->mappings.clear(); - uint32_t _size946; - ::apache::thrift::protocol::TType _etype949; - xfer += iprot->readListBegin(_etype949, _size946); - this->mappings.resize(_size946); - uint32_t _i950; - for (_i950 = 0; _i950 < _size946; ++_i950) + uint32_t _size966; + ::apache::thrift::protocol::TType _etype969; + xfer += iprot->readListBegin(_etype969, _size966); + this->mappings.resize(_size966); + uint32_t _i970; + for (_i970 = 0; _i970 < _size966; ++_i970) { - xfer += this->mappings[_i950].read(iprot); + xfer += this->mappings[_i970].read(iprot); } xfer += iprot->readListEnd(); } @@ -23467,14 +24014,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size951; - ::apache::thrift::protocol::TType _etype954; - xfer += iprot->readListBegin(_etype954, _size951); - this->triggers.resize(_size951); - uint32_t _i955; - for (_i955 = 0; _i955 < _size951; ++_i955) + uint32_t _size971; + ::apache::thrift::protocol::TType _etype974; + xfer += iprot->readListBegin(_etype974, _size971); + this->triggers.resize(_size971); + uint32_t _i975; + for (_i975 = 0; _i975 < _size971; ++_i975) { - xfer += this->triggers[_i955].read(iprot); + xfer += this->triggers[_i975].read(iprot); } xfer += iprot->readListEnd(); } @@ -23487,14 +24034,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->poolTriggers.clear(); - uint32_t _size956; - ::apache::thrift::protocol::TType _etype959; - xfer += iprot->readListBegin(_etype959, _size956); - this->poolTriggers.resize(_size956); - uint32_t _i960; - for (_i960 = 0; _i960 < _size956; ++_i960) + uint32_t _size976; + ::apache::thrift::protocol::TType _etype979; + xfer += iprot->readListBegin(_etype979, _size976); + this->poolTriggers.resize(_size976); + uint32_t _i980; + for (_i980 = 0; _i980 < _size976; ++_i980) { - xfer += this->poolTriggers[_i960].read(iprot); + xfer += this->poolTriggers[_i980].read(iprot); } xfer += iprot->readListEnd(); } @@ -23531,10 +24078,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->pools.size())); - std::vector ::const_iterator _iter961; - for (_iter961 = this->pools.begin(); _iter961 != this->pools.end(); ++_iter961) + std::vector ::const_iterator _iter981; + for (_iter981 = this->pools.begin(); _iter981 != this->pools.end(); ++_iter981) { - xfer += (*_iter961).write(oprot); + xfer += (*_iter981).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23544,10 +24091,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->mappings.size())); - std::vector ::const_iterator _iter962; - for (_iter962 = this->mappings.begin(); _iter962 != this->mappings.end(); ++_iter962) + std::vector ::const_iterator _iter982; + for (_iter982 = this->mappings.begin(); _iter982 != this->mappings.end(); ++_iter982) { - xfer += (*_iter962).write(oprot); + xfer += (*_iter982).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23557,10 +24104,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter963; - for (_iter963 = this->triggers.begin(); _iter963 != this->triggers.end(); ++_iter963) + std::vector ::const_iterator _iter983; + for (_iter983 = this->triggers.begin(); _iter983 != this->triggers.end(); ++_iter983) { - xfer += (*_iter963).write(oprot); + xfer += (*_iter983).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23570,10 +24117,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->poolTriggers.size())); - std::vector ::const_iterator _iter964; - for (_iter964 = this->poolTriggers.begin(); _iter964 != this->poolTriggers.end(); ++_iter964) + std::vector ::const_iterator _iter984; + for (_iter984 = this->poolTriggers.begin(); _iter984 != this->poolTriggers.end(); ++_iter984) { - xfer += (*_iter964).write(oprot); + xfer += (*_iter984).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23594,21 +24141,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) { swap(a.__isset, b.__isset); } -WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other965) { - plan = other965.plan; - pools = other965.pools; - mappings = other965.mappings; - triggers = other965.triggers; - poolTriggers = other965.poolTriggers; - __isset = other965.__isset; -} -WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other966) { - plan = other966.plan; - pools = other966.pools; - mappings = other966.mappings; - triggers = other966.triggers; - poolTriggers = other966.poolTriggers; - __isset = other966.__isset; +WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other985) { + plan = other985.plan; + pools = other985.pools; + mappings = other985.mappings; + triggers = other985.triggers; + poolTriggers = other985.poolTriggers; + __isset = other985.__isset; +} +WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other986) { + plan = other986.plan; + pools = other986.pools; + mappings = other986.mappings; + triggers = other986.triggers; + poolTriggers = other986.poolTriggers; + __isset = other986.__isset; return *this; } void WMFullResourcePlan::printTo(std::ostream& out) const { @@ -23713,15 +24260,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other967) { - resourcePlan = other967.resourcePlan; - copyFrom = other967.copyFrom; - __isset = other967.__isset; +WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other987) { + resourcePlan = other987.resourcePlan; + copyFrom = other987.copyFrom; + __isset = other987.__isset; } -WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other968) { - resourcePlan = other968.resourcePlan; - copyFrom = other968.copyFrom; - __isset = other968.__isset; +WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other988) { + resourcePlan = other988.resourcePlan; + copyFrom = other988.copyFrom; + __isset = other988.__isset; return *this; } void WMCreateResourcePlanRequest::printTo(std::ostream& out) const { @@ -23781,11 +24328,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) { (void) b; } -WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other969) { - (void) other969; +WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other989) { + (void) other989; } -WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other970) { - (void) other970; +WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other990) { + (void) other990; return *this; } void WMCreateResourcePlanResponse::printTo(std::ostream& out) const { @@ -23843,11 +24390,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b) (void) b; } -WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other971) { - (void) other971; +WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other991) { + (void) other991; } -WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other972) { - (void) other972; +WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other992) { + (void) other992; return *this; } void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const { @@ -23928,13 +24475,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b swap(a.__isset, b.__isset); } -WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other973) { - resourcePlan = other973.resourcePlan; - __isset = other973.__isset; +WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other993) { + resourcePlan = other993.resourcePlan; + __isset = other993.__isset; } -WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other974) { - resourcePlan = other974.resourcePlan; - __isset = other974.__isset; +WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other994) { + resourcePlan = other994.resourcePlan; + __isset = other994.__isset; return *this; } void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const { @@ -24016,13 +24563,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other975) { - resourcePlanName = other975.resourcePlanName; - __isset = other975.__isset; +WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other995) { + resourcePlanName = other995.resourcePlanName; + __isset = other995.__isset; } -WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other976) { - resourcePlanName = other976.resourcePlanName; - __isset = other976.__isset; +WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other996) { + resourcePlanName = other996.resourcePlanName; + __isset = other996.__isset; return *this; } void WMGetResourcePlanRequest::printTo(std::ostream& out) const { @@ -24104,13 +24651,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other977) { - resourcePlan = other977.resourcePlan; - __isset = other977.__isset; +WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other997) { + resourcePlan = other997.resourcePlan; + __isset = other997.__isset; } -WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other978) { - resourcePlan = other978.resourcePlan; - __isset = other978.__isset; +WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other998) { + resourcePlan = other998.resourcePlan; + __isset = other998.__isset; return *this; } void WMGetResourcePlanResponse::printTo(std::ostream& out) const { @@ -24169,11 +24716,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) { (void) b; } -WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other979) { - (void) other979; +WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other999) { + (void) other999; } -WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other980) { - (void) other980; +WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1000) { + (void) other1000; return *this; } void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const { @@ -24217,14 +24764,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourcePlans.clear(); - uint32_t _size981; - ::apache::thrift::protocol::TType _etype984; - xfer += iprot->readListBegin(_etype984, _size981); - this->resourcePlans.resize(_size981); - uint32_t _i985; - for (_i985 = 0; _i985 < _size981; ++_i985) + uint32_t _size1001; + ::apache::thrift::protocol::TType _etype1004; + xfer += iprot->readListBegin(_etype1004, _size1001); + this->resourcePlans.resize(_size1001); + uint32_t _i1005; + for (_i1005 = 0; _i1005 < _size1001; ++_i1005) { - xfer += this->resourcePlans[_i985].read(iprot); + xfer += this->resourcePlans[_i1005].read(iprot); } xfer += iprot->readListEnd(); } @@ -24254,10 +24801,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourcePlans.size())); - std::vector ::const_iterator _iter986; - for (_iter986 = this->resourcePlans.begin(); _iter986 != this->resourcePlans.end(); ++_iter986) + std::vector ::const_iterator _iter1006; + for (_iter1006 = this->resourcePlans.begin(); _iter1006 != this->resourcePlans.end(); ++_iter1006) { - xfer += (*_iter986).write(oprot); + xfer += (*_iter1006).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24274,13 +24821,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other987) { - resourcePlans = other987.resourcePlans; - __isset = other987.__isset; +WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1007) { + resourcePlans = other1007.resourcePlans; + __isset = other1007.__isset; } -WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other988) { - resourcePlans = other988.resourcePlans; - __isset = other988.__isset; +WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1008) { + resourcePlans = other1008.resourcePlans; + __isset = other1008.__isset; return *this; } void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const { @@ -24438,21 +24985,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other989) { - resourcePlanName = other989.resourcePlanName; - resourcePlan = other989.resourcePlan; - isEnableAndActivate = other989.isEnableAndActivate; - isForceDeactivate = other989.isForceDeactivate; - isReplace = other989.isReplace; - __isset = other989.__isset; -} -WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other990) { - resourcePlanName = other990.resourcePlanName; - resourcePlan = other990.resourcePlan; - isEnableAndActivate = other990.isEnableAndActivate; - isForceDeactivate = other990.isForceDeactivate; - isReplace = other990.isReplace; - __isset = other990.__isset; +WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1009) { + resourcePlanName = other1009.resourcePlanName; + resourcePlan = other1009.resourcePlan; + isEnableAndActivate = other1009.isEnableAndActivate; + isForceDeactivate = other1009.isForceDeactivate; + isReplace = other1009.isReplace; + __isset = other1009.__isset; +} +WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1010) { + resourcePlanName = other1010.resourcePlanName; + resourcePlan = other1010.resourcePlan; + isEnableAndActivate = other1010.isEnableAndActivate; + isForceDeactivate = other1010.isForceDeactivate; + isReplace = other1010.isReplace; + __isset = other1010.__isset; return *this; } void WMAlterResourcePlanRequest::printTo(std::ostream& out) const { @@ -24538,13 +25085,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other991) { - fullResourcePlan = other991.fullResourcePlan; - __isset = other991.__isset; +WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1011) { + fullResourcePlan = other1011.fullResourcePlan; + __isset = other1011.__isset; } -WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other992) { - fullResourcePlan = other992.fullResourcePlan; - __isset = other992.__isset; +WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1012) { + fullResourcePlan = other1012.fullResourcePlan; + __isset = other1012.__isset; return *this; } void WMAlterResourcePlanResponse::printTo(std::ostream& out) const { @@ -24626,13 +25173,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other993) { - resourcePlanName = other993.resourcePlanName; - __isset = other993.__isset; +WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1013) { + resourcePlanName = other1013.resourcePlanName; + __isset = other1013.__isset; } -WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other994) { - resourcePlanName = other994.resourcePlanName; - __isset = other994.__isset; +WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1014) { + resourcePlanName = other1014.resourcePlanName; + __isset = other1014.__isset; return *this; } void WMValidateResourcePlanRequest::printTo(std::ostream& out) const { @@ -24682,14 +25229,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->errors.clear(); - uint32_t _size995; - ::apache::thrift::protocol::TType _etype998; - xfer += iprot->readListBegin(_etype998, _size995); - this->errors.resize(_size995); - uint32_t _i999; - for (_i999 = 0; _i999 < _size995; ++_i999) + uint32_t _size1015; + ::apache::thrift::protocol::TType _etype1018; + xfer += iprot->readListBegin(_etype1018, _size1015); + this->errors.resize(_size1015); + uint32_t _i1019; + for (_i1019 = 0; _i1019 < _size1015; ++_i1019) { - xfer += iprot->readString(this->errors[_i999]); + xfer += iprot->readString(this->errors[_i1019]); } xfer += iprot->readListEnd(); } @@ -24702,14 +25249,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->warnings.clear(); - uint32_t _size1000; - ::apache::thrift::protocol::TType _etype1003; - xfer += iprot->readListBegin(_etype1003, _size1000); - this->warnings.resize(_size1000); - uint32_t _i1004; - for (_i1004 = 0; _i1004 < _size1000; ++_i1004) + uint32_t _size1020; + ::apache::thrift::protocol::TType _etype1023; + xfer += iprot->readListBegin(_etype1023, _size1020); + this->warnings.resize(_size1020); + uint32_t _i1024; + for (_i1024 = 0; _i1024 < _size1020; ++_i1024) { - xfer += iprot->readString(this->warnings[_i1004]); + xfer += iprot->readString(this->warnings[_i1024]); } xfer += iprot->readListEnd(); } @@ -24739,10 +25286,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->errors.size())); - std::vector ::const_iterator _iter1005; - for (_iter1005 = this->errors.begin(); _iter1005 != this->errors.end(); ++_iter1005) + std::vector ::const_iterator _iter1025; + for (_iter1025 = this->errors.begin(); _iter1025 != this->errors.end(); ++_iter1025) { - xfer += oprot->writeString((*_iter1005)); + xfer += oprot->writeString((*_iter1025)); } xfer += oprot->writeListEnd(); } @@ -24752,10 +25299,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->warnings.size())); - std::vector ::const_iterator _iter1006; - for (_iter1006 = this->warnings.begin(); _iter1006 != this->warnings.end(); ++_iter1006) + std::vector ::const_iterator _iter1026; + for (_iter1026 = this->warnings.begin(); _iter1026 != this->warnings.end(); ++_iter1026) { - xfer += oprot->writeString((*_iter1006)); + xfer += oprot->writeString((*_iter1026)); } xfer += oprot->writeListEnd(); } @@ -24773,15 +25320,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b) swap(a.__isset, b.__isset); } -WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1007) { - errors = other1007.errors; - warnings = other1007.warnings; - __isset = other1007.__isset; +WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1027) { + errors = other1027.errors; + warnings = other1027.warnings; + __isset = other1027.__isset; } -WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1008) { - errors = other1008.errors; - warnings = other1008.warnings; - __isset = other1008.__isset; +WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1028) { + errors = other1028.errors; + warnings = other1028.warnings; + __isset = other1028.__isset; return *this; } void WMValidateResourcePlanResponse::printTo(std::ostream& out) const { @@ -24864,13 +25411,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1009) { - resourcePlanName = other1009.resourcePlanName; - __isset = other1009.__isset; +WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1029) { + resourcePlanName = other1029.resourcePlanName; + __isset = other1029.__isset; } -WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1010) { - resourcePlanName = other1010.resourcePlanName; - __isset = other1010.__isset; +WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1030) { + resourcePlanName = other1030.resourcePlanName; + __isset = other1030.__isset; return *this; } void WMDropResourcePlanRequest::printTo(std::ostream& out) const { @@ -24929,11 +25476,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) { (void) b; } -WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1011) { - (void) other1011; +WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1031) { + (void) other1031; } -WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1012) { - (void) other1012; +WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1032) { + (void) other1032; return *this; } void WMDropResourcePlanResponse::printTo(std::ostream& out) const { @@ -25014,13 +25561,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1013) { - trigger = other1013.trigger; - __isset = other1013.__isset; +WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1033) { + trigger = other1033.trigger; + __isset = other1033.__isset; } -WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1014) { - trigger = other1014.trigger; - __isset = other1014.__isset; +WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1034) { + trigger = other1034.trigger; + __isset = other1034.__isset; return *this; } void WMCreateTriggerRequest::printTo(std::ostream& out) const { @@ -25079,11 +25626,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) { (void) b; } -WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1015) { - (void) other1015; +WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1035) { + (void) other1035; } -WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1016) { - (void) other1016; +WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1036) { + (void) other1036; return *this; } void WMCreateTriggerResponse::printTo(std::ostream& out) const { @@ -25164,13 +25711,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1017) { - trigger = other1017.trigger; - __isset = other1017.__isset; +WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1037) { + trigger = other1037.trigger; + __isset = other1037.__isset; } -WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1018) { - trigger = other1018.trigger; - __isset = other1018.__isset; +WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1038) { + trigger = other1038.trigger; + __isset = other1038.__isset; return *this; } void WMAlterTriggerRequest::printTo(std::ostream& out) const { @@ -25229,11 +25776,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) { (void) b; } -WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1019) { - (void) other1019; +WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1039) { + (void) other1039; } -WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1020) { - (void) other1020; +WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1040) { + (void) other1040; return *this; } void WMAlterTriggerResponse::printTo(std::ostream& out) const { @@ -25333,15 +25880,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1021) { - resourcePlanName = other1021.resourcePlanName; - triggerName = other1021.triggerName; - __isset = other1021.__isset; +WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1041) { + resourcePlanName = other1041.resourcePlanName; + triggerName = other1041.triggerName; + __isset = other1041.__isset; } -WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1022) { - resourcePlanName = other1022.resourcePlanName; - triggerName = other1022.triggerName; - __isset = other1022.__isset; +WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1042) { + resourcePlanName = other1042.resourcePlanName; + triggerName = other1042.triggerName; + __isset = other1042.__isset; return *this; } void WMDropTriggerRequest::printTo(std::ostream& out) const { @@ -25401,11 +25948,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) { (void) b; } -WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1023) { - (void) other1023; +WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1043) { + (void) other1043; } -WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1024) { - (void) other1024; +WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1044) { + (void) other1044; return *this; } void WMDropTriggerResponse::printTo(std::ostream& out) const { @@ -25486,13 +26033,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1025) { - resourcePlanName = other1025.resourcePlanName; - __isset = other1025.__isset; +WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1045) { + resourcePlanName = other1045.resourcePlanName; + __isset = other1045.__isset; } -WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1026) { - resourcePlanName = other1026.resourcePlanName; - __isset = other1026.__isset; +WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1046) { + resourcePlanName = other1046.resourcePlanName; + __isset = other1046.__isset; return *this; } void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const { @@ -25537,14 +26084,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size1027; - ::apache::thrift::protocol::TType _etype1030; - xfer += iprot->readListBegin(_etype1030, _size1027); - this->triggers.resize(_size1027); - uint32_t _i1031; - for (_i1031 = 0; _i1031 < _size1027; ++_i1031) + uint32_t _size1047; + ::apache::thrift::protocol::TType _etype1050; + xfer += iprot->readListBegin(_etype1050, _size1047); + this->triggers.resize(_size1047); + uint32_t _i1051; + for (_i1051 = 0; _i1051 < _size1047; ++_i1051) { - xfer += this->triggers[_i1031].read(iprot); + xfer += this->triggers[_i1051].read(iprot); } xfer += iprot->readListEnd(); } @@ -25574,10 +26121,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol:: xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter1032; - for (_iter1032 = this->triggers.begin(); _iter1032 != this->triggers.end(); ++_iter1032) + std::vector ::const_iterator _iter1052; + for (_iter1052 = this->triggers.begin(); _iter1052 != this->triggers.end(); ++_iter1052) { - xfer += (*_iter1032).write(oprot); + xfer += (*_iter1052).write(oprot); } xfer += oprot->writeListEnd(); } @@ -25594,13 +26141,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1033) { - triggers = other1033.triggers; - __isset = other1033.__isset; +WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1053) { + triggers = other1053.triggers; + __isset = other1053.__isset; } -WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1034) { - triggers = other1034.triggers; - __isset = other1034.__isset; +WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1054) { + triggers = other1054.triggers; + __isset = other1054.__isset; return *this; } void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { @@ -25682,13 +26229,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) { swap(a.__isset, b.__isset); } -WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1035) { - pool = other1035.pool; - __isset = other1035.__isset; +WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1055) { + pool = other1055.pool; + __isset = other1055.__isset; } -WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1036) { - pool = other1036.pool; - __isset = other1036.__isset; +WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1056) { + pool = other1056.pool; + __isset = other1056.__isset; return *this; } void WMCreatePoolRequest::printTo(std::ostream& out) const { @@ -25747,11 +26294,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) { (void) b; } -WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1037) { - (void) other1037; +WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1057) { + (void) other1057; } -WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1038) { - (void) other1038; +WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1058) { + (void) other1058; return *this; } void WMCreatePoolResponse::printTo(std::ostream& out) const { @@ -25851,15 +26398,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) { swap(a.__isset, b.__isset); } -WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1039) { - pool = other1039.pool; - poolPath = other1039.poolPath; - __isset = other1039.__isset; +WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1059) { + pool = other1059.pool; + poolPath = other1059.poolPath; + __isset = other1059.__isset; } -WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1040) { - pool = other1040.pool; - poolPath = other1040.poolPath; - __isset = other1040.__isset; +WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1060) { + pool = other1060.pool; + poolPath = other1060.poolPath; + __isset = other1060.__isset; return *this; } void WMAlterPoolRequest::printTo(std::ostream& out) const { @@ -25919,11 +26466,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) { (void) b; } -WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1041) { - (void) other1041; +WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1061) { + (void) other1061; } -WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1042) { - (void) other1042; +WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1062) { + (void) other1062; return *this; } void WMAlterPoolResponse::printTo(std::ostream& out) const { @@ -26023,15 +26570,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { swap(a.__isset, b.__isset); } -WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1043) { - resourcePlanName = other1043.resourcePlanName; - poolPath = other1043.poolPath; - __isset = other1043.__isset; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1063) { + resourcePlanName = other1063.resourcePlanName; + poolPath = other1063.poolPath; + __isset = other1063.__isset; } -WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1044) { - resourcePlanName = other1044.resourcePlanName; - poolPath = other1044.poolPath; - __isset = other1044.__isset; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1064) { + resourcePlanName = other1064.resourcePlanName; + poolPath = other1064.poolPath; + __isset = other1064.__isset; return *this; } void WMDropPoolRequest::printTo(std::ostream& out) const { @@ -26091,11 +26638,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { (void) b; } -WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1045) { - (void) other1045; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1065) { + (void) other1065; } -WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1046) { - (void) other1046; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1066) { + (void) other1066; return *this; } void WMDropPoolResponse::printTo(std::ostream& out) const { @@ -26195,15 +26742,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) swap(a.__isset, b.__isset); } -WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1047) { - mapping = other1047.mapping; - update = other1047.update; - __isset = other1047.__isset; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1067) { + mapping = other1067.mapping; + update = other1067.update; + __isset = other1067.__isset; } -WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1048) { - mapping = other1048.mapping; - update = other1048.update; - __isset = other1048.__isset; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1068) { + mapping = other1068.mapping; + update = other1068.update; + __isset = other1068.__isset; return *this; } void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { @@ -26263,11 +26810,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b (void) b; } -WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1049) { - (void) other1049; +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1069) { + (void) other1069; } -WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1050) { - (void) other1050; +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1070) { + (void) other1070; return *this; } void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { @@ -26348,13 +26895,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { swap(a.__isset, b.__isset); } -WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1051) { - mapping = other1051.mapping; - __isset = other1051.__isset; +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1071) { + mapping = other1071.mapping; + __isset = other1071.__isset; } -WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1052) { - mapping = other1052.mapping; - __isset = other1052.__isset; +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1072) { + mapping = other1072.mapping; + __isset = other1072.__isset; return *this; } void WMDropMappingRequest::printTo(std::ostream& out) const { @@ -26413,11 +26960,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { (void) b; } -WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1053) { - (void) other1053; +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1073) { + (void) other1073; } -WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1054) { - (void) other1054; +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1074) { + (void) other1074; return *this; } void WMDropMappingResponse::printTo(std::ostream& out) const { @@ -26555,19 +27102,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP swap(a.__isset, b.__isset); } -WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1055) { - resourcePlanName = other1055.resourcePlanName; - triggerName = other1055.triggerName; - poolPath = other1055.poolPath; - drop = other1055.drop; - __isset = other1055.__isset; +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1075) { + resourcePlanName = other1075.resourcePlanName; + triggerName = other1075.triggerName; + poolPath = other1075.poolPath; + drop = other1075.drop; + __isset = other1075.__isset; } -WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1056) { - resourcePlanName = other1056.resourcePlanName; - triggerName = other1056.triggerName; - poolPath = other1056.poolPath; - drop = other1056.drop; - __isset = other1056.__isset; +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1076) { + resourcePlanName = other1076.resourcePlanName; + triggerName = other1076.triggerName; + poolPath = other1076.poolPath; + drop = other1076.drop; + __isset = other1076.__isset; return *this; } void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { @@ -26629,11 +27176,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo (void) b; } -WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1057) { - (void) other1057; +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1077) { + (void) other1077; } -WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1058) { - (void) other1058; +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1078) { + (void) other1078; return *this; } void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { @@ -26712,13 +27259,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1059) : TException() { - message = other1059.message; - __isset = other1059.__isset; +MetaException::MetaException(const MetaException& other1079) : TException() { + message = other1079.message; + __isset = other1079.__isset; } -MetaException& MetaException::operator=(const MetaException& other1060) { - message = other1060.message; - __isset = other1060.__isset; +MetaException& MetaException::operator=(const MetaException& other1080) { + message = other1080.message; + __isset = other1080.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -26809,13 +27356,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1061) : TException() { - message = other1061.message; - __isset = other1061.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1081) : TException() { + message = other1081.message; + __isset = other1081.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1062) { - message = other1062.message; - __isset = other1062.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1082) { + message = other1082.message; + __isset = other1082.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -26906,13 +27453,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1063) : TException() { - message = other1063.message; - __isset = other1063.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1083) : TException() { + message = other1083.message; + __isset = other1083.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1064) { - message = other1064.message; - __isset = other1064.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1084) { + message = other1084.message; + __isset = other1084.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -27003,13 +27550,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1065) : TException() { - message = other1065.message; - __isset = other1065.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1085) : TException() { + message = other1085.message; + __isset = other1085.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1066) { - message = other1066.message; - __isset = other1066.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1086) { + message = other1086.message; + __isset = other1086.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -27100,13 +27647,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1067) : TException() { - message = other1067.message; - __isset = other1067.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1087) : TException() { + message = other1087.message; + __isset = other1087.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1068) { - message = other1068.message; - __isset = other1068.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1088) { + message = other1088.message; + __isset = other1088.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -27197,13 +27744,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1069) : TException() { - message = other1069.message; - __isset = other1069.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1089) : TException() { + message = other1089.message; + __isset = other1089.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1070) { - message = other1070.message; - __isset = other1070.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1090) { + message = other1090.message; + __isset = other1090.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -27294,13 +27841,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1071) : TException() { - message = other1071.message; - __isset = other1071.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1091) : TException() { + message = other1091.message; + __isset = other1091.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1072) { - message = other1072.message; - __isset = other1072.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1092) { + message = other1092.message; + __isset = other1092.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -27391,13 +27938,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1073) : TException() { - message = other1073.message; - __isset = other1073.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1093) : TException() { + message = other1093.message; + __isset = other1093.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1074) { - message = other1074.message; - __isset = other1074.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1094) { + message = other1094.message; + __isset = other1094.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -27488,13 +28035,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other1075) : TException() { - message = other1075.message; - __isset = other1075.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other1095) : TException() { + message = other1095.message; + __isset = other1095.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other1076) { - message = other1076.message; - __isset = other1076.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other1096) { + message = other1096.message; + __isset = other1096.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -27585,13 +28132,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1077) : TException() { - message = other1077.message; - __isset = other1077.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1097) : TException() { + message = other1097.message; + __isset = other1097.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1078) { - message = other1078.message; - __isset = other1078.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1098) { + message = other1098.message; + __isset = other1098.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -27682,13 +28229,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1079) : TException() { - message = other1079.message; - __isset = other1079.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1099) : TException() { + message = other1099.message; + __isset = other1099.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1080) { - message = other1080.message; - __isset = other1080.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1100) { + message = other1100.message; + __isset = other1100.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -27779,13 +28326,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1081) : TException() { - message = other1081.message; - __isset = other1081.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1101) : TException() { + message = other1101.message; + __isset = other1101.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1082) { - message = other1082.message; - __isset = other1082.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1102) { + message = other1102.message; + __isset = other1102.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -27876,13 +28423,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1083) : TException() { - message = other1083.message; - __isset = other1083.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1103) : TException() { + message = other1103.message; + __isset = other1103.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1084) { - message = other1084.message; - __isset = other1084.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1104) { + message = other1104.message; + __isset = other1104.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -27973,13 +28520,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1085) : TException() { - message = other1085.message; - __isset = other1085.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1105) : TException() { + message = other1105.message; + __isset = other1105.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1086) { - message = other1086.message; - __isset = other1086.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1106) { + message = other1106.message; + __isset = other1106.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -28070,13 +28617,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1087) : TException() { - message = other1087.message; - __isset = other1087.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1107) : TException() { + message = other1107.message; + __isset = other1107.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1088) { - message = other1088.message; - __isset = other1088.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1108) { + message = other1108.message; + __isset = other1108.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -28167,13 +28714,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1089) : TException() { - message = other1089.message; - __isset = other1089.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1109) : TException() { + message = other1109.message; + __isset = other1109.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1090) { - message = other1090.message; - __isset = other1090.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1110) { + message = other1110.message; + __isset = other1110.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 835cbb3308..7c38d5be74 100644 --- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -198,6 +198,8 @@ class SQLUniqueConstraint; class SQLNotNullConstraint; +class SQLDefaultConstraint; + class Type; class HiveObjectRef; @@ -304,6 +306,10 @@ class NotNullConstraintsRequest; class NotNullConstraintsResponse; +class DefaultConstraintsRequest; + +class DefaultConstraintsResponse; + class DropConstraintRequest; class AddPrimaryKeyRequest; @@ -314,6 +320,8 @@ class AddUniqueConstraintRequest; class AddNotNullConstraintRequest; +class AddDefaultConstraintRequest; + class PartitionsByExprResult; class PartitionsByExprRequest; @@ -1084,6 +1092,94 @@ inline std::ostream& operator<<(std::ostream& out, const SQLNotNullConstraint& o return out; } +typedef struct _SQLDefaultConstraint__isset { + _SQLDefaultConstraint__isset() : table_db(false), table_name(false), column_name(false), default_value(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool table_db :1; + bool table_name :1; + bool column_name :1; + bool default_value :1; + bool dc_name :1; + bool enable_cstr :1; + bool validate_cstr :1; + bool rely_cstr :1; +} _SQLDefaultConstraint__isset; + +class SQLDefaultConstraint { + public: + + SQLDefaultConstraint(const SQLDefaultConstraint&); + SQLDefaultConstraint& operator=(const SQLDefaultConstraint&); + SQLDefaultConstraint() : table_db(), table_name(), column_name(), default_value(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + } + + virtual ~SQLDefaultConstraint() throw(); + std::string table_db; + std::string table_name; + std::string column_name; + std::string default_value; + std::string dc_name; + bool enable_cstr; + bool validate_cstr; + bool rely_cstr; + + _SQLDefaultConstraint__isset __isset; + + void __set_table_db(const std::string& val); + + void __set_table_name(const std::string& val); + + void __set_column_name(const std::string& val); + + void __set_default_value(const std::string& val); + + void __set_dc_name(const std::string& val); + + void __set_enable_cstr(const bool val); + + void __set_validate_cstr(const bool val); + + void __set_rely_cstr(const bool val); + + bool operator == (const SQLDefaultConstraint & rhs) const + { + if (!(table_db == rhs.table_db)) + return false; + if (!(table_name == rhs.table_name)) + return false; + if (!(column_name == rhs.column_name)) + return false; + if (!(default_value == rhs.default_value)) + return false; + if (!(dc_name == rhs.dc_name)) + return false; + if (!(enable_cstr == rhs.enable_cstr)) + return false; + if (!(validate_cstr == rhs.validate_cstr)) + return false; + if (!(rely_cstr == rhs.rely_cstr)) + return false; + return true; + } + bool operator != (const SQLDefaultConstraint &rhs) const { + return !(*this == rhs); + } + + bool operator < (const SQLDefaultConstraint & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b); + +inline std::ostream& operator<<(std::ostream& out, const SQLDefaultConstraint& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _Type__isset { _Type__isset() : name(false), type1(false), type2(false), fields(false) {} bool name :1; @@ -4346,6 +4442,91 @@ inline std::ostream& operator<<(std::ostream& out, const NotNullConstraintsRespo } +class DefaultConstraintsRequest { + public: + + DefaultConstraintsRequest(const DefaultConstraintsRequest&); + DefaultConstraintsRequest& operator=(const DefaultConstraintsRequest&); + DefaultConstraintsRequest() : db_name(), tbl_name() { + } + + virtual ~DefaultConstraintsRequest() throw(); + std::string db_name; + std::string tbl_name; + + void __set_db_name(const std::string& val); + + void __set_tbl_name(const std::string& val); + + bool operator == (const DefaultConstraintsRequest & rhs) const + { + if (!(db_name == rhs.db_name)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + return true; + } + bool operator != (const DefaultConstraintsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const DefaultConstraintsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(DefaultConstraintsRequest &a, DefaultConstraintsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const DefaultConstraintsRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class DefaultConstraintsResponse { + public: + + DefaultConstraintsResponse(const DefaultConstraintsResponse&); + DefaultConstraintsResponse& operator=(const DefaultConstraintsResponse&); + DefaultConstraintsResponse() { + } + + virtual ~DefaultConstraintsResponse() throw(); + std::vector defaultConstraints; + + void __set_defaultConstraints(const std::vector & val); + + bool operator == (const DefaultConstraintsResponse & rhs) const + { + if (!(defaultConstraints == rhs.defaultConstraints)) + return false; + return true; + } + bool operator != (const DefaultConstraintsResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const DefaultConstraintsResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(DefaultConstraintsResponse &a, DefaultConstraintsResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const DefaultConstraintsResponse& obj) +{ + obj.printTo(out); + return out; +} + + class DropConstraintRequest { public: @@ -4556,6 +4737,46 @@ inline std::ostream& operator<<(std::ostream& out, const AddNotNullConstraintReq } +class AddDefaultConstraintRequest { + public: + + AddDefaultConstraintRequest(const AddDefaultConstraintRequest&); + AddDefaultConstraintRequest& operator=(const AddDefaultConstraintRequest&); + AddDefaultConstraintRequest() { + } + + virtual ~AddDefaultConstraintRequest() throw(); + std::vector defaultConstraintCols; + + void __set_defaultConstraintCols(const std::vector & val); + + bool operator == (const AddDefaultConstraintRequest & rhs) const + { + if (!(defaultConstraintCols == rhs.defaultConstraintCols)) + return false; + return true; + } + bool operator != (const AddDefaultConstraintRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const AddDefaultConstraintRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(AddDefaultConstraintRequest &a, AddDefaultConstraintRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const AddDefaultConstraintRequest& obj) +{ + obj.printTo(out); + return out; +} + + class PartitionsByExprResult { public: diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java index 0e5dbf7ae6..29e4e69cc4 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnsRequest st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list548.size); - long _elem549; - for (int _i550 = 0; _i550 < _list548.size; ++_i550) + org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list564.size); + long _elem565; + for (int _i566 = 0; _i566 < _list564.size; ++_i566) { - _elem549 = iprot.readI64(); - struct.txn_ids.add(_elem549); + _elem565 = iprot.readI64(); + struct.txn_ids.add(_elem565); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnsRequest s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter551 : struct.txn_ids) + for (long _iter567 : struct.txn_ids) { - oprot.writeI64(_iter551); + oprot.writeI64(_iter567); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter552 : struct.txn_ids) + for (long _iter568 : struct.txn_ids) { - oprot.writeI64(_iter552); + oprot.writeI64(_iter568); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list553.size); - long _elem554; - for (int _i555 = 0; _i555 < _list553.size; ++_i555) + org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list569.size); + long _elem570; + for (int _i571 = 0; _i571 < _list569.size; ++_i571) { - _elem554 = iprot.readI64(); - struct.txn_ids.add(_elem554); + _elem570 = iprot.readI64(); + struct.txn_ids.add(_elem570); } } struct.setTxn_idsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java new file mode 100644 index 0000000000..8703b61cf1 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddDefaultConstraintRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddDefaultConstraintRequest"); + + private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraintCols", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new AddDefaultConstraintRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new AddDefaultConstraintRequestTupleSchemeFactory()); + } + + private List defaultConstraintCols; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DEFAULT_CONSTRAINT_COLS((short)1, "defaultConstraintCols"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DEFAULT_CONSTRAINT_COLS + return DEFAULT_CONSTRAINT_COLS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DEFAULT_CONSTRAINT_COLS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraintCols", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddDefaultConstraintRequest.class, metaDataMap); + } + + public AddDefaultConstraintRequest() { + } + + public AddDefaultConstraintRequest( + List defaultConstraintCols) + { + this(); + this.defaultConstraintCols = defaultConstraintCols; + } + + /** + * Performs a deep copy on other. + */ + public AddDefaultConstraintRequest(AddDefaultConstraintRequest other) { + if (other.isSetDefaultConstraintCols()) { + List __this__defaultConstraintCols = new ArrayList(other.defaultConstraintCols.size()); + for (SQLDefaultConstraint other_element : other.defaultConstraintCols) { + __this__defaultConstraintCols.add(new SQLDefaultConstraint(other_element)); + } + this.defaultConstraintCols = __this__defaultConstraintCols; + } + } + + public AddDefaultConstraintRequest deepCopy() { + return new AddDefaultConstraintRequest(this); + } + + @Override + public void clear() { + this.defaultConstraintCols = null; + } + + public int getDefaultConstraintColsSize() { + return (this.defaultConstraintCols == null) ? 0 : this.defaultConstraintCols.size(); + } + + public java.util.Iterator getDefaultConstraintColsIterator() { + return (this.defaultConstraintCols == null) ? null : this.defaultConstraintCols.iterator(); + } + + public void addToDefaultConstraintCols(SQLDefaultConstraint elem) { + if (this.defaultConstraintCols == null) { + this.defaultConstraintCols = new ArrayList(); + } + this.defaultConstraintCols.add(elem); + } + + public List getDefaultConstraintCols() { + return this.defaultConstraintCols; + } + + public void setDefaultConstraintCols(List defaultConstraintCols) { + this.defaultConstraintCols = defaultConstraintCols; + } + + public void unsetDefaultConstraintCols() { + this.defaultConstraintCols = null; + } + + /** Returns true if field defaultConstraintCols is set (has been assigned a value) and false otherwise */ + public boolean isSetDefaultConstraintCols() { + return this.defaultConstraintCols != null; + } + + public void setDefaultConstraintColsIsSet(boolean value) { + if (!value) { + this.defaultConstraintCols = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DEFAULT_CONSTRAINT_COLS: + if (value == null) { + unsetDefaultConstraintCols(); + } else { + setDefaultConstraintCols((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DEFAULT_CONSTRAINT_COLS: + return getDefaultConstraintCols(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DEFAULT_CONSTRAINT_COLS: + return isSetDefaultConstraintCols(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof AddDefaultConstraintRequest) + return this.equals((AddDefaultConstraintRequest)that); + return false; + } + + public boolean equals(AddDefaultConstraintRequest that) { + if (that == null) + return false; + + boolean this_present_defaultConstraintCols = true && this.isSetDefaultConstraintCols(); + boolean that_present_defaultConstraintCols = true && that.isSetDefaultConstraintCols(); + if (this_present_defaultConstraintCols || that_present_defaultConstraintCols) { + if (!(this_present_defaultConstraintCols && that_present_defaultConstraintCols)) + return false; + if (!this.defaultConstraintCols.equals(that.defaultConstraintCols)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_defaultConstraintCols = true && (isSetDefaultConstraintCols()); + list.add(present_defaultConstraintCols); + if (present_defaultConstraintCols) + list.add(defaultConstraintCols); + + return list.hashCode(); + } + + @Override + public int compareTo(AddDefaultConstraintRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDefaultConstraintCols()).compareTo(other.isSetDefaultConstraintCols()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDefaultConstraintCols()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultConstraintCols, other.defaultConstraintCols); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("AddDefaultConstraintRequest("); + boolean first = true; + + sb.append("defaultConstraintCols:"); + if (this.defaultConstraintCols == null) { + sb.append("null"); + } else { + sb.append(this.defaultConstraintCols); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDefaultConstraintCols()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'defaultConstraintCols' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class AddDefaultConstraintRequestStandardSchemeFactory implements SchemeFactory { + public AddDefaultConstraintRequestStandardScheme getScheme() { + return new AddDefaultConstraintRequestStandardScheme(); + } + } + + private static class AddDefaultConstraintRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DEFAULT_CONSTRAINT_COLS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list394 = iprot.readListBegin(); + struct.defaultConstraintCols = new ArrayList(_list394.size); + SQLDefaultConstraint _elem395; + for (int _i396 = 0; _i396 < _list394.size; ++_i396) + { + _elem395 = new SQLDefaultConstraint(); + _elem395.read(iprot); + struct.defaultConstraintCols.add(_elem395); + } + iprot.readListEnd(); + } + struct.setDefaultConstraintColsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.defaultConstraintCols != null) { + oprot.writeFieldBegin(DEFAULT_CONSTRAINT_COLS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraintCols.size())); + for (SQLDefaultConstraint _iter397 : struct.defaultConstraintCols) + { + _iter397.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class AddDefaultConstraintRequestTupleSchemeFactory implements SchemeFactory { + public AddDefaultConstraintRequestTupleScheme getScheme() { + return new AddDefaultConstraintRequestTupleScheme(); + } + } + + private static class AddDefaultConstraintRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.defaultConstraintCols.size()); + for (SQLDefaultConstraint _iter398 : struct.defaultConstraintCols) + { + _iter398.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraintCols = new ArrayList(_list399.size); + SQLDefaultConstraint _elem400; + for (int _i401 = 0; _i401 < _list399.size; ++_i401) + { + _elem400 = new SQLDefaultConstraint(); + _elem400.read(iprot); + struct.defaultConstraintCols.add(_elem400); + } + } + struct.setDefaultConstraintColsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index a01dc2463c..99c0741871 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list646 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list646.size); - String _elem647; - for (int _i648 = 0; _i648 < _list646.size; ++_i648) + org.apache.thrift.protocol.TList _list662 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list662.size); + String _elem663; + for (int _i664 = 0; _i664 < _list662.size; ++_i664) { - _elem647 = iprot.readString(); - struct.partitionnames.add(_elem647); + _elem663 = iprot.readString(); + struct.partitionnames.add(_elem663); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter649 : struct.partitionnames) + for (String _iter665 : struct.partitionnames) { - oprot.writeString(_iter649); + oprot.writeString(_iter665); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter650 : struct.partitionnames) + for (String _iter666 : struct.partitionnames) { - oprot.writeString(_iter650); + oprot.writeString(_iter666); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list651.size); - String _elem652; - for (int _i653 = 0; _i653 < _list651.size; ++_i653) + org.apache.thrift.protocol.TList _list667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list667.size); + String _elem668; + for (int _i669 = 0; _i669 < _list667.size; ++_i669) { - _elem652 = iprot.readString(); - struct.partitionnames.add(_elem652); + _elem668 = iprot.readString(); + struct.partitionnames.add(_elem668); } } struct.setPartitionnamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java index c1c0dbf229..2bcdea1728 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddForeignKeyReques case 1: // FOREIGN_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list362 = iprot.readListBegin(); - struct.foreignKeyCols = new ArrayList(_list362.size); - SQLForeignKey _elem363; - for (int _i364 = 0; _i364 < _list362.size; ++_i364) + org.apache.thrift.protocol.TList _list370 = iprot.readListBegin(); + struct.foreignKeyCols = new ArrayList(_list370.size); + SQLForeignKey _elem371; + for (int _i372 = 0; _i372 < _list370.size; ++_i372) { - _elem363 = new SQLForeignKey(); - _elem363.read(iprot); - struct.foreignKeyCols.add(_elem363); + _elem371 = new SQLForeignKey(); + _elem371.read(iprot); + struct.foreignKeyCols.add(_elem371); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddForeignKeyReque oprot.writeFieldBegin(FOREIGN_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeyCols.size())); - for (SQLForeignKey _iter365 : struct.foreignKeyCols) + for (SQLForeignKey _iter373 : struct.foreignKeyCols) { - _iter365.write(oprot); + _iter373.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeyCols.size()); - for (SQLForeignKey _iter366 : struct.foreignKeyCols) + for (SQLForeignKey _iter374 : struct.foreignKeyCols) { - _iter366.write(oprot); + _iter374.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeyCols = new ArrayList(_list367.size); - SQLForeignKey _elem368; - for (int _i369 = 0; _i369 < _list367.size; ++_i369) + org.apache.thrift.protocol.TList _list375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeyCols = new ArrayList(_list375.size); + SQLForeignKey _elem376; + for (int _i377 = 0; _i377 < _list375.size; ++_i377) { - _elem368 = new SQLForeignKey(); - _elem368.read(iprot); - struct.foreignKeyCols.add(_elem368); + _elem376 = new SQLForeignKey(); + _elem376.read(iprot); + struct.foreignKeyCols.add(_elem376); } } struct.setForeignKeyColsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java index 0bd85f3140..f944e46fd6 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddNotNullConstrain case 1: // NOT_NULL_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list378 = iprot.readListBegin(); - struct.notNullConstraintCols = new ArrayList(_list378.size); - SQLNotNullConstraint _elem379; - for (int _i380 = 0; _i380 < _list378.size; ++_i380) + org.apache.thrift.protocol.TList _list386 = iprot.readListBegin(); + struct.notNullConstraintCols = new ArrayList(_list386.size); + SQLNotNullConstraint _elem387; + for (int _i388 = 0; _i388 < _list386.size; ++_i388) { - _elem379 = new SQLNotNullConstraint(); - _elem379.read(iprot); - struct.notNullConstraintCols.add(_elem379); + _elem387 = new SQLNotNullConstraint(); + _elem387.read(iprot); + struct.notNullConstraintCols.add(_elem387); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddNotNullConstrai oprot.writeFieldBegin(NOT_NULL_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraintCols.size())); - for (SQLNotNullConstraint _iter381 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter389 : struct.notNullConstraintCols) { - _iter381.write(oprot); + _iter389.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraintCols.size()); - for (SQLNotNullConstraint _iter382 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter390 : struct.notNullConstraintCols) { - _iter382.write(oprot); + _iter390.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraintCols = new ArrayList(_list383.size); - SQLNotNullConstraint _elem384; - for (int _i385 = 0; _i385 < _list383.size; ++_i385) + org.apache.thrift.protocol.TList _list391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraintCols = new ArrayList(_list391.size); + SQLNotNullConstraint _elem392; + for (int _i393 = 0; _i393 < _list391.size; ++_i393) { - _elem384 = new SQLNotNullConstraint(); - _elem384.read(iprot); - struct.notNullConstraintCols.add(_elem384); + _elem392 = new SQLNotNullConstraint(); + _elem392.read(iprot); + struct.notNullConstraintCols.add(_elem392); } } struct.setNotNullConstraintColsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 9119336a46..d351678bb6 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -704,14 +704,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques case 3: // PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list452 = iprot.readListBegin(); - struct.parts = new ArrayList(_list452.size); - Partition _elem453; - for (int _i454 = 0; _i454 < _list452.size; ++_i454) + org.apache.thrift.protocol.TList _list468 = iprot.readListBegin(); + struct.parts = new ArrayList(_list468.size); + Partition _elem469; + for (int _i470 = 0; _i470 < _list468.size; ++_i470) { - _elem453 = new Partition(); - _elem453.read(iprot); - struct.parts.add(_elem453); + _elem469 = new Partition(); + _elem469.read(iprot); + struct.parts.add(_elem469); } iprot.readListEnd(); } @@ -763,9 +763,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldBegin(PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size())); - for (Partition _iter455 : struct.parts) + for (Partition _iter471 : struct.parts) { - _iter455.write(oprot); + _iter471.write(oprot); } oprot.writeListEnd(); } @@ -800,9 +800,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques oprot.writeString(struct.tblName); { oprot.writeI32(struct.parts.size()); - for (Partition _iter456 : struct.parts) + for (Partition _iter472 : struct.parts) { - _iter456.write(oprot); + _iter472.write(oprot); } } oprot.writeBool(struct.ifNotExists); @@ -824,14 +824,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list457 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.parts = new ArrayList(_list457.size); - Partition _elem458; - for (int _i459 = 0; _i459 < _list457.size; ++_i459) + org.apache.thrift.protocol.TList _list473 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parts = new ArrayList(_list473.size); + Partition _elem474; + for (int _i475 = 0; _i475 < _list473.size; ++_i475) { - _elem458 = new Partition(); - _elem458.read(iprot); - struct.parts.add(_elem458); + _elem474 = new Partition(); + _elem474.read(iprot); + struct.parts.add(_elem474); } } struct.setPartsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index 57d4953af6..23525e2703 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list444 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list444.size); - Partition _elem445; - for (int _i446 = 0; _i446 < _list444.size; ++_i446) + org.apache.thrift.protocol.TList _list460 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list460.size); + Partition _elem461; + for (int _i462 = 0; _i462 < _list460.size; ++_i462) { - _elem445 = new Partition(); - _elem445.read(iprot); - struct.partitions.add(_elem445); + _elem461 = new Partition(); + _elem461.read(iprot); + struct.partitions.add(_elem461); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter447 : struct.partitions) + for (Partition _iter463 : struct.partitions) { - _iter447.write(oprot); + _iter463.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter448 : struct.partitions) + for (Partition _iter464 : struct.partitions) { - _iter448.write(oprot); + _iter464.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list449 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list449.size); - Partition _elem450; - for (int _i451 = 0; _i451 < _list449.size; ++_i451) + org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list465.size); + Partition _elem466; + for (int _i467 = 0; _i467 < _list465.size; ++_i467) { - _elem450 = new Partition(); - _elem450.read(iprot); - struct.partitions.add(_elem450); + _elem466 = new Partition(); + _elem466.read(iprot); + struct.partitions.add(_elem466); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java index 900985bb39..9efcfe0a1b 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPrimaryKeyReques case 1: // PRIMARY_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); - struct.primaryKeyCols = new ArrayList(_list354.size); - SQLPrimaryKey _elem355; - for (int _i356 = 0; _i356 < _list354.size; ++_i356) + org.apache.thrift.protocol.TList _list362 = iprot.readListBegin(); + struct.primaryKeyCols = new ArrayList(_list362.size); + SQLPrimaryKey _elem363; + for (int _i364 = 0; _i364 < _list362.size; ++_i364) { - _elem355 = new SQLPrimaryKey(); - _elem355.read(iprot); - struct.primaryKeyCols.add(_elem355); + _elem363 = new SQLPrimaryKey(); + _elem363.read(iprot); + struct.primaryKeyCols.add(_elem363); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPrimaryKeyReque oprot.writeFieldBegin(PRIMARY_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeyCols.size())); - for (SQLPrimaryKey _iter357 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter365 : struct.primaryKeyCols) { - _iter357.write(oprot); + _iter365.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeyCols.size()); - for (SQLPrimaryKey _iter358 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter366 : struct.primaryKeyCols) { - _iter358.write(oprot); + _iter366.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeyCols = new ArrayList(_list359.size); - SQLPrimaryKey _elem360; - for (int _i361 = 0; _i361 < _list359.size; ++_i361) + org.apache.thrift.protocol.TList _list367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeyCols = new ArrayList(_list367.size); + SQLPrimaryKey _elem368; + for (int _i369 = 0; _i369 < _list367.size; ++_i369) { - _elem360 = new SQLPrimaryKey(); - _elem360.read(iprot); - struct.primaryKeyCols.add(_elem360); + _elem368 = new SQLPrimaryKey(); + _elem368.read(iprot); + struct.primaryKeyCols.add(_elem368); } } struct.setPrimaryKeyColsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java index df4f54465c..0429fd5dc1 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddUniqueConstraint case 1: // UNIQUE_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list370 = iprot.readListBegin(); - struct.uniqueConstraintCols = new ArrayList(_list370.size); - SQLUniqueConstraint _elem371; - for (int _i372 = 0; _i372 < _list370.size; ++_i372) + org.apache.thrift.protocol.TList _list378 = iprot.readListBegin(); + struct.uniqueConstraintCols = new ArrayList(_list378.size); + SQLUniqueConstraint _elem379; + for (int _i380 = 0; _i380 < _list378.size; ++_i380) { - _elem371 = new SQLUniqueConstraint(); - _elem371.read(iprot); - struct.uniqueConstraintCols.add(_elem371); + _elem379 = new SQLUniqueConstraint(); + _elem379.read(iprot); + struct.uniqueConstraintCols.add(_elem379); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddUniqueConstrain oprot.writeFieldBegin(UNIQUE_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraintCols.size())); - for (SQLUniqueConstraint _iter373 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter381 : struct.uniqueConstraintCols) { - _iter373.write(oprot); + _iter381.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraintCols.size()); - for (SQLUniqueConstraint _iter374 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter382 : struct.uniqueConstraintCols) { - _iter374.write(oprot); + _iter382.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint public void read(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list375 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraintCols = new ArrayList(_list375.size); - SQLUniqueConstraint _elem376; - for (int _i377 = 0; _i377 < _list375.size; ++_i377) + org.apache.thrift.protocol.TList _list383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraintCols = new ArrayList(_list383.size); + SQLUniqueConstraint _elem384; + for (int _i385 = 0; _i385 < _list383.size; ++_i385) { - _elem376 = new SQLUniqueConstraint(); - _elem376.read(iprot); - struct.uniqueConstraintCols.add(_elem376); + _elem384 = new SQLUniqueConstraint(); + _elem384.read(iprot); + struct.uniqueConstraintCols.add(_elem384); } } struct.setUniqueConstraintColsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index 1aec53bd4c..60a32adaa0 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -521,13 +521,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list580 = iprot.readListBegin(); - struct.txnIds = new ArrayList(_list580.size); - long _elem581; - for (int _i582 = 0; _i582 < _list580.size; ++_i582) + org.apache.thrift.protocol.TList _list596 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list596.size); + long _elem597; + for (int _i598 = 0; _i598 < _list596.size; ++_i598) { - _elem581 = iprot.readI64(); - struct.txnIds.add(_elem581); + _elem597 = iprot.readI64(); + struct.txnIds.add(_elem597); } iprot.readListEnd(); } @@ -569,9 +569,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); - for (long _iter583 : struct.txnIds) + for (long _iter599 : struct.txnIds) { - oprot.writeI64(_iter583); + oprot.writeI64(_iter599); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnIds.size()); - for (long _iter584 : struct.txnIds) + for (long _iter600 : struct.txnIds) { - oprot.writeI64(_iter584); + oprot.writeI64(_iter600); } } oprot.writeString(struct.dbName); @@ -619,13 +619,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txnIds = new ArrayList(_list585.size); - long _elem586; - for (int _i587 = 0; _i587 < _list585.size; ++_i587) + org.apache.thrift.protocol.TList _list601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list601.size); + long _elem602; + for (int _i603 = 0; _i603 < _list601.size; ++_i603) { - _elem586 = iprot.readI64(); - struct.txnIds.add(_elem586); + _elem602 = iprot.readI64(); + struct.txnIds.add(_elem602); } } struct.setTxnIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java index e29e1db6b6..9bf2f7f1db 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_TO_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list588 = iprot.readListBegin(); - struct.txnToWriteIds = new ArrayList(_list588.size); - TxnToWriteId _elem589; - for (int _i590 = 0; _i590 < _list588.size; ++_i590) + org.apache.thrift.protocol.TList _list604 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list604.size); + TxnToWriteId _elem605; + for (int _i606 = 0; _i606 < _list604.size; ++_i606) { - _elem589 = new TxnToWriteId(); - _elem589.read(iprot); - struct.txnToWriteIds.add(_elem589); + _elem605 = new TxnToWriteId(); + _elem605.read(iprot); + struct.txnToWriteIds.add(_elem605); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); - for (TxnToWriteId _iter591 : struct.txnToWriteIds) + for (TxnToWriteId _iter607 : struct.txnToWriteIds) { - _iter591.write(oprot); + _iter607.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnToWriteIds.size()); - for (TxnToWriteId _iter592 : struct.txnToWriteIds) + for (TxnToWriteId _iter608 : struct.txnToWriteIds) { - _iter592.write(oprot); + _iter608.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.txnToWriteIds = new ArrayList(_list593.size); - TxnToWriteId _elem594; - for (int _i595 = 0; _i595 < _list593.size; ++_i595) + org.apache.thrift.protocol.TList _list609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list609.size); + TxnToWriteId _elem610; + for (int _i611 = 0; _i611 < _list609.size; ++_i611) { - _elem594 = new TxnToWriteId(); - _elem594.read(iprot); - struct.txnToWriteIds.add(_elem594); + _elem610 = new TxnToWriteId(); + _elem610.read(iprot); + struct.txnToWriteIds.add(_elem610); } } struct.setTxnToWriteIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index ee9841f650..9feadc48f2 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list746.size); - long _elem747; - for (int _i748 = 0; _i748 < _list746.size; ++_i748) + org.apache.thrift.protocol.TList _list762 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list762.size); + long _elem763; + for (int _i764 = 0; _i764 < _list762.size; ++_i764) { - _elem747 = iprot.readI64(); - struct.fileIds.add(_elem747); + _elem763 = iprot.readI64(); + struct.fileIds.add(_elem763); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter749 : struct.fileIds) + for (long _iter765 : struct.fileIds) { - oprot.writeI64(_iter749); + oprot.writeI64(_iter765); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter750 : struct.fileIds) + for (long _iter766 : struct.fileIds) { - oprot.writeI64(_iter750); + oprot.writeI64(_iter766); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list751 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list751.size); - long _elem752; - for (int _i753 = 0; _i753 < _list751.size; ++_i753) + org.apache.thrift.protocol.TList _list767 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list767.size); + long _elem768; + for (int _i769 = 0; _i769 < _list767.size; ++_i769) { - _elem752 = iprot.readI64(); - struct.fileIds.add(_elem752); + _elem768 = iprot.readI64(); + struct.fileIds.add(_elem768); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index 8dbe4c1d44..0a1f76f0e4 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list762 = iprot.readListBegin(); - struct.values = new ArrayList(_list762.size); - ClientCapability _elem763; - for (int _i764 = 0; _i764 < _list762.size; ++_i764) + org.apache.thrift.protocol.TList _list778 = iprot.readListBegin(); + struct.values = new ArrayList(_list778.size); + ClientCapability _elem779; + for (int _i780 = 0; _i780 < _list778.size; ++_i780) { - _elem763 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem763); + _elem779 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem779); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter765 : struct.values) + for (ClientCapability _iter781 : struct.values) { - oprot.writeI32(_iter765.getValue()); + oprot.writeI32(_iter781.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter766 : struct.values) + for (ClientCapability _iter782 : struct.values) { - oprot.writeI32(_iter766.getValue()); + oprot.writeI32(_iter782.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list767 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list767.size); - ClientCapability _elem768; - for (int _i769 = 0; _i769 < _list767.size; ++_i769) + org.apache.thrift.protocol.TList _list783 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list783.size); + ClientCapability _elem784; + for (int _i785 = 0; _i785 < _list783.size; ++_i785) { - _elem768 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem768); + _elem784 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem784); } } struct.setValuesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 1853720368..af84a9c91c 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map628 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map628.size); - String _key629; - String _val630; - for (int _i631 = 0; _i631 < _map628.size; ++_i631) + org.apache.thrift.protocol.TMap _map644 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map644.size); + String _key645; + String _val646; + for (int _i647 = 0; _i647 < _map644.size; ++_i647) { - _key629 = iprot.readString(); - _val630 = iprot.readString(); - struct.properties.put(_key629, _val630); + _key645 = iprot.readString(); + _val646 = iprot.readString(); + struct.properties.put(_key645, _val646); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter632 : struct.properties.entrySet()) + for (Map.Entry _iter648 : struct.properties.entrySet()) { - oprot.writeString(_iter632.getKey()); - oprot.writeString(_iter632.getValue()); + oprot.writeString(_iter648.getKey()); + oprot.writeString(_iter648.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter633 : struct.properties.entrySet()) + for (Map.Entry _iter649 : struct.properties.entrySet()) { - oprot.writeString(_iter633.getKey()); - oprot.writeString(_iter633.getValue()); + oprot.writeString(_iter649.getKey()); + oprot.writeString(_iter649.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map634 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map634.size); - String _key635; - String _val636; - for (int _i637 = 0; _i637 < _map634.size; ++_i637) + org.apache.thrift.protocol.TMap _map650 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map650.size); + String _key651; + String _val652; + for (int _i653 = 0; _i653 < _map650.size; ++_i653) { - _key635 = iprot.readString(); - _val636 = iprot.readString(); - struct.properties.put(_key635, _val636); + _key651 = iprot.readString(); + _val652 = iprot.readString(); + struct.properties.put(_key651, _val652); } } struct.setPropertiesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index 717840fa0b..3da8ed5e9e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st case 3: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set654 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set654.size); - String _elem655; - for (int _i656 = 0; _i656 < _set654.size; ++_i656) + org.apache.thrift.protocol.TSet _set670 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set670.size); + String _elem671; + for (int _i672 = 0; _i672 < _set670.size; ++_i672) { - _elem655 = iprot.readString(); - struct.tablesUsed.add(_elem655); + _elem671 = iprot.readString(); + struct.tablesUsed.add(_elem671); } iprot.readSetEnd(); } @@ -669,9 +669,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter657 : struct.tablesUsed) + for (String _iter673 : struct.tablesUsed) { - oprot.writeString(_iter657); + oprot.writeString(_iter673); } oprot.writeSetEnd(); } @@ -705,9 +705,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st oprot.writeString(struct.tblName); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter658 : struct.tablesUsed) + for (String _iter674 : struct.tablesUsed) { - oprot.writeString(_iter658); + oprot.writeString(_iter674); } } BitSet optionals = new BitSet(); @@ -728,13 +728,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata str struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TSet _set659 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set659.size); - String _elem660; - for (int _i661 = 0; _i661 < _set659.size; ++_i661) + org.apache.thrift.protocol.TSet _set675 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set675.size); + String _elem676; + for (int _i677 = 0; _i677 < _set675.size; ++_i677) { - _elem660 = iprot.readString(); - struct.tablesUsed.add(_elem660); + _elem676 = iprot.readString(); + struct.tablesUsed.add(_elem676); } } struct.setTablesUsedIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java new file mode 100644 index 0000000000..5f4954d2a7 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java @@ -0,0 +1,490 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DefaultConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DefaultConstraintsRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new DefaultConstraintsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new DefaultConstraintsRequestTupleSchemeFactory()); + } + + private String db_name; // required + private String tbl_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DefaultConstraintsRequest.class, metaDataMap); + } + + public DefaultConstraintsRequest() { + } + + public DefaultConstraintsRequest( + String db_name, + String tbl_name) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + } + + /** + * Performs a deep copy on other. + */ + public DefaultConstraintsRequest(DefaultConstraintsRequest other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + } + + public DefaultConstraintsRequest deepCopy() { + return new DefaultConstraintsRequest(this); + } + + @Override + public void clear() { + this.db_name = null; + this.tbl_name = null; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof DefaultConstraintsRequest) + return this.equals((DefaultConstraintsRequest)that); + return false; + } + + public boolean equals(DefaultConstraintsRequest that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + return list.hashCode(); + } + + @Override + public int compareTo(DefaultConstraintsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("DefaultConstraintsRequest("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDb_name()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); + } + + if (!isSetTbl_name()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tbl_name' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class DefaultConstraintsRequestStandardSchemeFactory implements SchemeFactory { + public DefaultConstraintsRequestStandardScheme getScheme() { + return new DefaultConstraintsRequestStandardScheme(); + } + } + + private static class DefaultConstraintsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class DefaultConstraintsRequestTupleSchemeFactory implements SchemeFactory { + public DefaultConstraintsRequestTupleScheme getScheme() { + return new DefaultConstraintsRequestTupleScheme(); + } + } + + private static class DefaultConstraintsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.db_name); + oprot.writeString(struct.tbl_name); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java new file mode 100644 index 0000000000..22514de513 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java @@ -0,0 +1,443 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DefaultConstraintsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DefaultConstraintsResponse"); + + private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraints", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new DefaultConstraintsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new DefaultConstraintsResponseTupleSchemeFactory()); + } + + private List defaultConstraints; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DEFAULT_CONSTRAINTS((short)1, "defaultConstraints"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DEFAULT_CONSTRAINTS + return DEFAULT_CONSTRAINTS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DEFAULT_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraints", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DefaultConstraintsResponse.class, metaDataMap); + } + + public DefaultConstraintsResponse() { + } + + public DefaultConstraintsResponse( + List defaultConstraints) + { + this(); + this.defaultConstraints = defaultConstraints; + } + + /** + * Performs a deep copy on other. + */ + public DefaultConstraintsResponse(DefaultConstraintsResponse other) { + if (other.isSetDefaultConstraints()) { + List __this__defaultConstraints = new ArrayList(other.defaultConstraints.size()); + for (SQLDefaultConstraint other_element : other.defaultConstraints) { + __this__defaultConstraints.add(new SQLDefaultConstraint(other_element)); + } + this.defaultConstraints = __this__defaultConstraints; + } + } + + public DefaultConstraintsResponse deepCopy() { + return new DefaultConstraintsResponse(this); + } + + @Override + public void clear() { + this.defaultConstraints = null; + } + + public int getDefaultConstraintsSize() { + return (this.defaultConstraints == null) ? 0 : this.defaultConstraints.size(); + } + + public java.util.Iterator getDefaultConstraintsIterator() { + return (this.defaultConstraints == null) ? null : this.defaultConstraints.iterator(); + } + + public void addToDefaultConstraints(SQLDefaultConstraint elem) { + if (this.defaultConstraints == null) { + this.defaultConstraints = new ArrayList(); + } + this.defaultConstraints.add(elem); + } + + public List getDefaultConstraints() { + return this.defaultConstraints; + } + + public void setDefaultConstraints(List defaultConstraints) { + this.defaultConstraints = defaultConstraints; + } + + public void unsetDefaultConstraints() { + this.defaultConstraints = null; + } + + /** Returns true if field defaultConstraints is set (has been assigned a value) and false otherwise */ + public boolean isSetDefaultConstraints() { + return this.defaultConstraints != null; + } + + public void setDefaultConstraintsIsSet(boolean value) { + if (!value) { + this.defaultConstraints = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DEFAULT_CONSTRAINTS: + if (value == null) { + unsetDefaultConstraints(); + } else { + setDefaultConstraints((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DEFAULT_CONSTRAINTS: + return getDefaultConstraints(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DEFAULT_CONSTRAINTS: + return isSetDefaultConstraints(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof DefaultConstraintsResponse) + return this.equals((DefaultConstraintsResponse)that); + return false; + } + + public boolean equals(DefaultConstraintsResponse that) { + if (that == null) + return false; + + boolean this_present_defaultConstraints = true && this.isSetDefaultConstraints(); + boolean that_present_defaultConstraints = true && that.isSetDefaultConstraints(); + if (this_present_defaultConstraints || that_present_defaultConstraints) { + if (!(this_present_defaultConstraints && that_present_defaultConstraints)) + return false; + if (!this.defaultConstraints.equals(that.defaultConstraints)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_defaultConstraints = true && (isSetDefaultConstraints()); + list.add(present_defaultConstraints); + if (present_defaultConstraints) + list.add(defaultConstraints); + + return list.hashCode(); + } + + @Override + public int compareTo(DefaultConstraintsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDefaultConstraints()).compareTo(other.isSetDefaultConstraints()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDefaultConstraints()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultConstraints, other.defaultConstraints); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("DefaultConstraintsResponse("); + boolean first = true; + + sb.append("defaultConstraints:"); + if (this.defaultConstraints == null) { + sb.append("null"); + } else { + sb.append(this.defaultConstraints); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDefaultConstraints()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'defaultConstraints' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class DefaultConstraintsResponseStandardSchemeFactory implements SchemeFactory { + public DefaultConstraintsResponseStandardScheme getScheme() { + return new DefaultConstraintsResponseStandardScheme(); + } + } + + private static class DefaultConstraintsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DEFAULT_CONSTRAINTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list354.size); + SQLDefaultConstraint _elem355; + for (int _i356 = 0; _i356 < _list354.size; ++_i356) + { + _elem355 = new SQLDefaultConstraint(); + _elem355.read(iprot); + struct.defaultConstraints.add(_elem355); + } + iprot.readListEnd(); + } + struct.setDefaultConstraintsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.defaultConstraints != null) { + oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); + for (SQLDefaultConstraint _iter357 : struct.defaultConstraints) + { + _iter357.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class DefaultConstraintsResponseTupleSchemeFactory implements SchemeFactory { + public DefaultConstraintsResponseTupleScheme getScheme() { + return new DefaultConstraintsResponseTupleScheme(); + } + } + + private static class DefaultConstraintsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.defaultConstraints.size()); + for (SQLDefaultConstraint _iter358 : struct.defaultConstraints) + { + _iter358.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list359.size); + SQLDefaultConstraint _elem360; + for (int _i361 = 0; _i361 < _list359.size; ++_i361) + { + _elem360 = new SQLDefaultConstraint(); + _elem360.read(iprot); + struct.defaultConstraints.add(_elem360); + } + } + struct.setDefaultConstraintsIsSet(true); + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index b9dc04a317..daf87c559f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResul case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list460 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list460.size); - Partition _elem461; - for (int _i462 = 0; _i462 < _list460.size; ++_i462) + org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list476.size); + Partition _elem477; + for (int _i478 = 0; _i478 < _list476.size; ++_i478) { - _elem461 = new Partition(); - _elem461.read(iprot); - struct.partitions.add(_elem461); + _elem477 = new Partition(); + _elem477.read(iprot); + struct.partitions.add(_elem477); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResu oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter463 : struct.partitions) + for (Partition _iter479 : struct.partitions) { - _iter463.write(oprot); + _iter479.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResul if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter464 : struct.partitions) + for (Partition _iter480 : struct.partitions) { - _iter464.write(oprot); + _iter480.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list465.size); - Partition _elem466; - for (int _i467 = 0; _i467 < _list465.size; ++_i467) + org.apache.thrift.protocol.TList _list481 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list481.size); + Partition _elem482; + for (int _i483 = 0; _i483 < _list481.size; ++_i483) { - _elem466 = new Partition(); - _elem466.read(iprot); - struct.partitions.add(_elem466); + _elem482 = new Partition(); + _elem482.read(iprot); + struct.partitions.add(_elem482); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 8936410e23..ca357ed33e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -713,13 +713,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list686 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list686.size); - String _elem687; - for (int _i688 = 0; _i688 < _list686.size; ++_i688) + org.apache.thrift.protocol.TList _list702 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list702.size); + String _elem703; + for (int _i704 = 0; _i704 < _list702.size; ++_i704) { - _elem687 = iprot.readString(); - struct.partitionVals.add(_elem687); + _elem703 = iprot.readString(); + struct.partitionVals.add(_elem703); } iprot.readListEnd(); } @@ -768,9 +768,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter689 : struct.partitionVals) + for (String _iter705 : struct.partitionVals) { - oprot.writeString(_iter689); + oprot.writeString(_iter705); } oprot.writeListEnd(); } @@ -816,9 +816,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter690 : struct.partitionVals) + for (String _iter706 : struct.partitionVals) { - oprot.writeString(_iter690); + oprot.writeString(_iter706); } } } @@ -843,13 +843,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list691.size); - String _elem692; - for (int _i693 = 0; _i693 < _list691.size; ++_i693) + org.apache.thrift.protocol.TList _list707 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list707.size); + String _elem708; + for (int _i709 = 0; _i709 < _list707.size; ++_i709) { - _elem692 = iprot.readString(); - struct.partitionVals.add(_elem692); + _elem708 = iprot.readString(); + struct.partitionVals.add(_elem708); } } struct.setPartitionValsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index ca62b882c0..32e94cc821 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -997,14 +997,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th case 8: // RESOURCE_URIS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); - struct.resourceUris = new ArrayList(_list516.size); - ResourceUri _elem517; - for (int _i518 = 0; _i518 < _list516.size; ++_i518) + org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); + struct.resourceUris = new ArrayList(_list532.size); + ResourceUri _elem533; + for (int _i534 = 0; _i534 < _list532.size; ++_i534) { - _elem517 = new ResourceUri(); - _elem517.read(iprot); - struct.resourceUris.add(_elem517); + _elem533 = new ResourceUri(); + _elem533.read(iprot); + struct.resourceUris.add(_elem533); } iprot.readListEnd(); } @@ -1063,9 +1063,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size())); - for (ResourceUri _iter519 : struct.resourceUris) + for (ResourceUri _iter535 : struct.resourceUris) { - _iter519.write(oprot); + _iter535.write(oprot); } oprot.writeListEnd(); } @@ -1138,9 +1138,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { { oprot.writeI32(struct.resourceUris.size()); - for (ResourceUri _iter520 : struct.resourceUris) + for (ResourceUri _iter536 : struct.resourceUris) { - _iter520.write(oprot); + _iter536.write(oprot); } } } @@ -1180,14 +1180,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourceUris = new ArrayList(_list521.size); - ResourceUri _elem522; - for (int _i523 = 0; _i523 < _list521.size; ++_i523) + org.apache.thrift.protocol.TList _list537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourceUris = new ArrayList(_list537.size); + ResourceUri _elem538; + for (int _i539 = 0; _i539 < _list537.size; ++_i539) { - _elem522 = new ResourceUri(); - _elem522.read(iprot); - struct.resourceUris.add(_elem522); + _elem538 = new ResourceUri(); + _elem538.read(iprot); + struct.resourceUris.add(_elem538); } } struct.setResourceUrisIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index ba29e90299..dd5ea74b1c 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list754 = iprot.readListBegin(); - struct.functions = new ArrayList(_list754.size); - Function _elem755; - for (int _i756 = 0; _i756 < _list754.size; ++_i756) + org.apache.thrift.protocol.TList _list770 = iprot.readListBegin(); + struct.functions = new ArrayList(_list770.size); + Function _elem771; + for (int _i772 = 0; _i772 < _list770.size; ++_i772) { - _elem755 = new Function(); - _elem755.read(iprot); - struct.functions.add(_elem755); + _elem771 = new Function(); + _elem771.read(iprot); + struct.functions.add(_elem771); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter757 : struct.functions) + for (Function _iter773 : struct.functions) { - _iter757.write(oprot); + _iter773.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter758 : struct.functions) + for (Function _iter774 : struct.functions) { - _iter758.write(oprot); + _iter774.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list759 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list759.size); - Function _elem760; - for (int _i761 = 0; _i761 < _list759.size; ++_i761) + org.apache.thrift.protocol.TList _list775 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list775.size); + Function _elem776; + for (int _i777 = 0; _i777 < _list775.size; ++_i777) { - _elem760 = new Function(); - _elem760.read(iprot); - struct.functions.add(_elem760); + _elem776 = new Function(); + _elem776.read(iprot); + struct.functions.add(_elem776); } } struct.setFunctionsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 62b0768d10..a3a577955d 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list704 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list704.size); - long _elem705; - for (int _i706 = 0; _i706 < _list704.size; ++_i706) + org.apache.thrift.protocol.TList _list720 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list720.size); + long _elem721; + for (int _i722 = 0; _i722 < _list720.size; ++_i722) { - _elem705 = iprot.readI64(); - struct.fileIds.add(_elem705); + _elem721 = iprot.readI64(); + struct.fileIds.add(_elem721); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter707 : struct.fileIds) + for (long _iter723 : struct.fileIds) { - oprot.writeI64(_iter707); + oprot.writeI64(_iter723); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter708 : struct.fileIds) + for (long _iter724 : struct.fileIds) { - oprot.writeI64(_iter708); + oprot.writeI64(_iter724); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list709 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list709.size); - long _elem710; - for (int _i711 = 0; _i711 < _list709.size; ++_i711) + org.apache.thrift.protocol.TList _list725 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list725.size); + long _elem726; + for (int _i727 = 0; _i727 < _list725.size; ++_i727) { - _elem710 = iprot.readI64(); - struct.fileIds.add(_elem710); + _elem726 = iprot.readI64(); + struct.fileIds.add(_elem726); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 881803fea0..7604f19699 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map694 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map694.size); - long _key695; - MetadataPpdResult _val696; - for (int _i697 = 0; _i697 < _map694.size; ++_i697) + org.apache.thrift.protocol.TMap _map710 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map710.size); + long _key711; + MetadataPpdResult _val712; + for (int _i713 = 0; _i713 < _map710.size; ++_i713) { - _key695 = iprot.readI64(); - _val696 = new MetadataPpdResult(); - _val696.read(iprot); - struct.metadata.put(_key695, _val696); + _key711 = iprot.readI64(); + _val712 = new MetadataPpdResult(); + _val712.read(iprot); + struct.metadata.put(_key711, _val712); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter698 : struct.metadata.entrySet()) + for (Map.Entry _iter714 : struct.metadata.entrySet()) { - oprot.writeI64(_iter698.getKey()); - _iter698.getValue().write(oprot); + oprot.writeI64(_iter714.getKey()); + _iter714.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter699 : struct.metadata.entrySet()) + for (Map.Entry _iter715 : struct.metadata.entrySet()) { - oprot.writeI64(_iter699.getKey()); - _iter699.getValue().write(oprot); + oprot.writeI64(_iter715.getKey()); + _iter715.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map700 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map700.size); - long _key701; - MetadataPpdResult _val702; - for (int _i703 = 0; _i703 < _map700.size; ++_i703) + org.apache.thrift.protocol.TMap _map716 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map716.size); + long _key717; + MetadataPpdResult _val718; + for (int _i719 = 0; _i719 < _map716.size; ++_i719) { - _key701 = iprot.readI64(); - _val702 = new MetadataPpdResult(); - _val702.read(iprot); - struct.metadata.put(_key701, _val702); + _key717 = iprot.readI64(); + _val718 = new MetadataPpdResult(); + _val718.read(iprot); + struct.metadata.put(_key717, _val718); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index a051fb08b3..ce73688d57 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list722.size); - long _elem723; - for (int _i724 = 0; _i724 < _list722.size; ++_i724) + org.apache.thrift.protocol.TList _list738 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list738.size); + long _elem739; + for (int _i740 = 0; _i740 < _list738.size; ++_i740) { - _elem723 = iprot.readI64(); - struct.fileIds.add(_elem723); + _elem739 = iprot.readI64(); + struct.fileIds.add(_elem739); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter725 : struct.fileIds) + for (long _iter741 : struct.fileIds) { - oprot.writeI64(_iter725); + oprot.writeI64(_iter741); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter726 : struct.fileIds) + for (long _iter742 : struct.fileIds) { - oprot.writeI64(_iter726); + oprot.writeI64(_iter742); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list727.size); - long _elem728; - for (int _i729 = 0; _i729 < _list727.size; ++_i729) + org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list743.size); + long _elem744; + for (int _i745 = 0; _i745 < _list743.size; ++_i745) { - _elem728 = iprot.readI64(); - struct.fileIds.add(_elem728); + _elem744 = iprot.readI64(); + struct.fileIds.add(_elem744); } } struct.setFileIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 74ca66ae4f..f797ce8943 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map712 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map712.size); - long _key713; - ByteBuffer _val714; - for (int _i715 = 0; _i715 < _map712.size; ++_i715) + org.apache.thrift.protocol.TMap _map728 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map728.size); + long _key729; + ByteBuffer _val730; + for (int _i731 = 0; _i731 < _map728.size; ++_i731) { - _key713 = iprot.readI64(); - _val714 = iprot.readBinary(); - struct.metadata.put(_key713, _val714); + _key729 = iprot.readI64(); + _val730 = iprot.readBinary(); + struct.metadata.put(_key729, _val730); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter716 : struct.metadata.entrySet()) + for (Map.Entry _iter732 : struct.metadata.entrySet()) { - oprot.writeI64(_iter716.getKey()); - oprot.writeBinary(_iter716.getValue()); + oprot.writeI64(_iter732.getKey()); + oprot.writeBinary(_iter732.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter717 : struct.metadata.entrySet()) + for (Map.Entry _iter733 : struct.metadata.entrySet()) { - oprot.writeI64(_iter717.getKey()); - oprot.writeBinary(_iter717.getValue()); + oprot.writeI64(_iter733.getKey()); + oprot.writeBinary(_iter733.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map718 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map718.size); - long _key719; - ByteBuffer _val720; - for (int _i721 = 0; _i721 < _map718.size; ++_i721) + org.apache.thrift.protocol.TMap _map734 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map734.size); + long _key735; + ByteBuffer _val736; + for (int _i737 = 0; _i737 < _map734.size; ++_i737) { - _key719 = iprot.readI64(); - _val720 = iprot.readBinary(); - struct.metadata.put(_key719, _val720); + _key735 = iprot.readI64(); + _val736 = iprot.readBinary(); + struct.metadata.put(_key735, _val736); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index ae644df9b3..ede4b7e0c1 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -447,14 +447,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResp case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list524.size); - TxnInfo _elem525; - for (int _i526 = 0; _i526 < _list524.size; ++_i526) + org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list540.size); + TxnInfo _elem541; + for (int _i542 = 0; _i542 < _list540.size; ++_i542) { - _elem525 = new TxnInfo(); - _elem525.read(iprot); - struct.open_txns.add(_elem525); + _elem541 = new TxnInfo(); + _elem541.read(iprot); + struct.open_txns.add(_elem541); } iprot.readListEnd(); } @@ -483,9 +483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoRes oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size())); - for (TxnInfo _iter527 : struct.open_txns) + for (TxnInfo _iter543 : struct.open_txns) { - _iter527.write(oprot); + _iter543.write(oprot); } oprot.writeListEnd(); } @@ -511,9 +511,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResp oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (TxnInfo _iter528 : struct.open_txns) + for (TxnInfo _iter544 : struct.open_txns) { - _iter528.write(oprot); + _iter544.write(oprot); } } } @@ -524,14 +524,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoRespo struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.open_txns = new ArrayList(_list529.size); - TxnInfo _elem530; - for (int _i531 = 0; _i531 < _list529.size; ++_i531) + org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.open_txns = new ArrayList(_list545.size); + TxnInfo _elem546; + for (int _i547 = 0; _i547 < _list545.size; ++_i547) { - _elem530 = new TxnInfo(); - _elem530.read(iprot); - struct.open_txns.add(_elem530); + _elem546 = new TxnInfo(); + _elem546.read(iprot); + struct.open_txns.add(_elem546); } } struct.setOpen_txnsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index 662c093e4a..9c6bcb740f 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -615,13 +615,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list532.size); - long _elem533; - for (int _i534 = 0; _i534 < _list532.size; ++_i534) + org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list548.size); + long _elem549; + for (int _i550 = 0; _i550 < _list548.size; ++_i550) { - _elem533 = iprot.readI64(); - struct.open_txns.add(_elem533); + _elem549 = iprot.readI64(); + struct.open_txns.add(_elem549); } iprot.readListEnd(); } @@ -666,9 +666,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRespons oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter535 : struct.open_txns) + for (long _iter551 : struct.open_txns) { - oprot.writeI64(_iter535); + oprot.writeI64(_iter551); } oprot.writeListEnd(); } @@ -704,9 +704,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter536 : struct.open_txns) + for (long _iter552 : struct.open_txns) { - oprot.writeI64(_iter536); + oprot.writeI64(_iter552); } } oprot.writeBinary(struct.abortedBits); @@ -726,13 +726,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new ArrayList(_list537.size); - long _elem538; - for (int _i539 = 0; _i539 < _list537.size; ++_i539) + org.apache.thrift.protocol.TList _list553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new ArrayList(_list553.size); + long _elem554; + for (int _i555 = 0; _i555 < _list553.size; ++_i555) { - _elem538 = iprot.readI64(); - struct.open_txns.add(_elem538); + _elem554 = iprot.readI64(); + struct.open_txns.add(_elem554); } } struct.setOpen_txnsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index 84af22f413..f495dd841e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -525,13 +525,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list770 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list770.size); - String _elem771; - for (int _i772 = 0; _i772 < _list770.size; ++_i772) + org.apache.thrift.protocol.TList _list786 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list786.size); + String _elem787; + for (int _i788 = 0; _i788 < _list786.size; ++_i788) { - _elem771 = iprot.readString(); - struct.tblNames.add(_elem771); + _elem787 = iprot.readString(); + struct.tblNames.add(_elem787); } iprot.readListEnd(); } @@ -572,9 +572,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter773 : struct.tblNames) + for (String _iter789 : struct.tblNames) { - oprot.writeString(_iter773); + oprot.writeString(_iter789); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter774 : struct.tblNames) + for (String _iter790 : struct.tblNames) { - oprot.writeString(_iter774); + oprot.writeString(_iter790); } } } @@ -636,13 +636,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list775 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list775.size); - String _elem776; - for (int _i777 = 0; _i777 < _list775.size; ++_i777) + org.apache.thrift.protocol.TList _list791 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list791.size); + String _elem792; + for (int _i793 = 0; _i793 < _list791.size; ++_i793) { - _elem776 = iprot.readString(); - struct.tblNames.add(_elem776); + _elem792 = iprot.readString(); + struct.tblNames.add(_elem792); } } struct.setTblNamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index 4aba1d2153..08755d7a23 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list778 = iprot.readListBegin(); - struct.tables = new ArrayList
(_list778.size); - Table _elem779; - for (int _i780 = 0; _i780 < _list778.size; ++_i780) + org.apache.thrift.protocol.TList _list794 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list794.size); + Table _elem795; + for (int _i796 = 0; _i796 < _list794.size; ++_i796) { - _elem779 = new Table(); - _elem779.read(iprot); - struct.tables.add(_elem779); + _elem795 = new Table(); + _elem795.read(iprot); + struct.tables.add(_elem795); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter781 : struct.tables) + for (Table _iter797 : struct.tables) { - _iter781.write(oprot); + _iter797.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter782 : struct.tables) + for (Table _iter798 : struct.tables) { - _iter782.write(oprot); + _iter798.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list783 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list783.size); - Table _elem784; - for (int _i785 = 0; _i785 < _list783.size; ++_i785) + org.apache.thrift.protocol.TList _list799 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list799.size); + Table _elem800; + for (int _i801 = 0; _i801 < _list799.size; ++_i801) { - _elem784 = new Table(); - _elem784.read(iprot); - struct.tables.add(_elem784); + _elem800 = new Table(); + _elem800.read(iprot); + struct.tables.add(_elem800); } } struct.setTablesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java index ec738b0394..1eead3e250 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java @@ -436,13 +436,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsReq case 1: // FULL_TABLE_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); - struct.fullTableNames = new ArrayList(_list556.size); - String _elem557; - for (int _i558 = 0; _i558 < _list556.size; ++_i558) + org.apache.thrift.protocol.TList _list572 = iprot.readListBegin(); + struct.fullTableNames = new ArrayList(_list572.size); + String _elem573; + for (int _i574 = 0; _i574 < _list572.size; ++_i574) { - _elem557 = iprot.readString(); - struct.fullTableNames.add(_elem557); + _elem573 = iprot.readString(); + struct.fullTableNames.add(_elem573); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(FULL_TABLE_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fullTableNames.size())); - for (String _iter559 : struct.fullTableNames) + for (String _iter575 : struct.fullTableNames) { - oprot.writeString(_iter559); + oprot.writeString(_iter575); } oprot.writeListEnd(); } @@ -508,9 +508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fullTableNames.size()); - for (String _iter560 : struct.fullTableNames) + for (String _iter576 : struct.fullTableNames) { - oprot.writeString(_iter560); + oprot.writeString(_iter576); } } oprot.writeString(struct.validTxnList); @@ -520,13 +520,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fullTableNames = new ArrayList(_list561.size); - String _elem562; - for (int _i563 = 0; _i563 < _list561.size; ++_i563) + org.apache.thrift.protocol.TList _list577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fullTableNames = new ArrayList(_list577.size); + String _elem578; + for (int _i579 = 0; _i579 < _list577.size; ++_i579) { - _elem562 = iprot.readString(); - struct.fullTableNames.add(_elem562); + _elem578 = iprot.readString(); + struct.fullTableNames.add(_elem578); } } struct.setFullTableNamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java index 50eba33a27..f42161304b 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRes case 1: // TBL_VALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list572 = iprot.readListBegin(); - struct.tblValidWriteIds = new ArrayList(_list572.size); - TableValidWriteIds _elem573; - for (int _i574 = 0; _i574 < _list572.size; ++_i574) + org.apache.thrift.protocol.TList _list588 = iprot.readListBegin(); + struct.tblValidWriteIds = new ArrayList(_list588.size); + TableValidWriteIds _elem589; + for (int _i590 = 0; _i590 < _list588.size; ++_i590) { - _elem573 = new TableValidWriteIds(); - _elem573.read(iprot); - struct.tblValidWriteIds.add(_elem573); + _elem589 = new TableValidWriteIds(); + _elem589.read(iprot); + struct.tblValidWriteIds.add(_elem589); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size())); - for (TableValidWriteIds _iter575 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter591 : struct.tblValidWriteIds) { - _iter575.write(oprot); + _iter591.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tblValidWriteIds.size()); - for (TableValidWriteIds _iter576 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter592 : struct.tblValidWriteIds) { - _iter576.write(oprot); + _iter592.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tblValidWriteIds = new ArrayList(_list577.size); - TableValidWriteIds _elem578; - for (int _i579 = 0; _i579 < _list577.size; ++_i579) + org.apache.thrift.protocol.TList _list593 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tblValidWriteIds = new ArrayList(_list593.size); + TableValidWriteIds _elem594; + for (int _i595 = 0; _i595 < _list593.size; ++_i595) { - _elem578 = new TableValidWriteIds(); - _elem578.read(iprot); - struct.tblValidWriteIds.add(_elem578); + _elem594 = new TableValidWriteIds(); + _elem594.read(iprot); + struct.tblValidWriteIds.add(_elem594); } } struct.setTblValidWriteIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index 0bcd837235..796bbfdcea 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set612 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set612.size); - long _elem613; - for (int _i614 = 0; _i614 < _set612.size; ++_i614) + org.apache.thrift.protocol.TSet _set628 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set628.size); + long _elem629; + for (int _i630 = 0; _i630 < _set628.size; ++_i630) { - _elem613 = iprot.readI64(); - struct.aborted.add(_elem613); + _elem629 = iprot.readI64(); + struct.aborted.add(_elem629); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set615 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set615.size); - long _elem616; - for (int _i617 = 0; _i617 < _set615.size; ++_i617) + org.apache.thrift.protocol.TSet _set631 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set631.size); + long _elem632; + for (int _i633 = 0; _i633 < _set631.size; ++_i633) { - _elem616 = iprot.readI64(); - struct.nosuch.add(_elem616); + _elem632 = iprot.readI64(); + struct.nosuch.add(_elem632); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter618 : struct.aborted) + for (long _iter634 : struct.aborted) { - oprot.writeI64(_iter618); + oprot.writeI64(_iter634); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter619 : struct.nosuch) + for (long _iter635 : struct.nosuch) { - oprot.writeI64(_iter619); + oprot.writeI64(_iter635); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter620 : struct.aborted) + for (long _iter636 : struct.aborted) { - oprot.writeI64(_iter620); + oprot.writeI64(_iter636); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter621 : struct.nosuch) + for (long _iter637 : struct.nosuch) { - oprot.writeI64(_iter621); + oprot.writeI64(_iter637); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set622 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set622.size); - long _elem623; - for (int _i624 = 0; _i624 < _set622.size; ++_i624) + org.apache.thrift.protocol.TSet _set638 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set638.size); + long _elem639; + for (int _i640 = 0; _i640 < _set638.size; ++_i640) { - _elem623 = iprot.readI64(); - struct.aborted.add(_elem623); + _elem639 = iprot.readI64(); + struct.aborted.add(_elem639); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set625 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set625.size); - long _elem626; - for (int _i627 = 0; _i627 < _set625.size; ++_i627) + org.apache.thrift.protocol.TSet _set641 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set641.size); + long _elem642; + for (int _i643 = 0; _i643 < _set641.size; ++_i643) { - _elem626 = iprot.readI64(); - struct.nosuch.add(_elem626); + _elem642 = iprot.readI64(); + struct.nosuch.add(_elem642); } } struct.setNosuchIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 85272ddc24..98647b0027 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -538,13 +538,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list670 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list670.size); - String _elem671; - for (int _i672 = 0; _i672 < _list670.size; ++_i672) + org.apache.thrift.protocol.TList _list686 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list686.size); + String _elem687; + for (int _i688 = 0; _i688 < _list686.size; ++_i688) { - _elem671 = iprot.readString(); - struct.filesAdded.add(_elem671); + _elem687 = iprot.readString(); + struct.filesAdded.add(_elem687); } iprot.readListEnd(); } @@ -556,13 +556,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list673 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list673.size); - String _elem674; - for (int _i675 = 0; _i675 < _list673.size; ++_i675) + org.apache.thrift.protocol.TList _list689 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list689.size); + String _elem690; + for (int _i691 = 0; _i691 < _list689.size; ++_i691) { - _elem674 = iprot.readString(); - struct.filesAddedChecksum.add(_elem674); + _elem690 = iprot.readString(); + struct.filesAddedChecksum.add(_elem690); } iprot.readListEnd(); } @@ -593,9 +593,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter676 : struct.filesAdded) + for (String _iter692 : struct.filesAdded) { - oprot.writeString(_iter676); + oprot.writeString(_iter692); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter677 : struct.filesAddedChecksum) + for (String _iter693 : struct.filesAddedChecksum) { - oprot.writeString(_iter677); + oprot.writeString(_iter693); } oprot.writeListEnd(); } @@ -634,9 +634,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter678 : struct.filesAdded) + for (String _iter694 : struct.filesAdded) { - oprot.writeString(_iter678); + oprot.writeString(_iter694); } } BitSet optionals = new BitSet(); @@ -653,9 +653,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter679 : struct.filesAddedChecksum) + for (String _iter695 : struct.filesAddedChecksum) { - oprot.writeString(_iter679); + oprot.writeString(_iter695); } } } @@ -665,13 +665,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list680 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list680.size); - String _elem681; - for (int _i682 = 0; _i682 < _list680.size; ++_i682) + org.apache.thrift.protocol.TList _list696 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list696.size); + String _elem697; + for (int _i698 = 0; _i698 < _list696.size; ++_i698) { - _elem681 = iprot.readString(); - struct.filesAdded.add(_elem681); + _elem697 = iprot.readString(); + struct.filesAdded.add(_elem697); } } struct.setFilesAddedIsSet(true); @@ -682,13 +682,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list683.size); - String _elem684; - for (int _i685 = 0; _i685 < _list683.size; ++_i685) + org.apache.thrift.protocol.TList _list699 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list699.size); + String _elem700; + for (int _i701 = 0; _i701 < _list699.size; ++_i701) { - _elem684 = iprot.readString(); - struct.filesAddedChecksum.add(_elem684); + _elem700 = iprot.readString(); + struct.filesAddedChecksum.add(_elem700); } } struct.setFilesAddedChecksumIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index cfdd0bdf76..64bf58cf1d 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list596 = iprot.readListBegin(); - struct.component = new ArrayList(_list596.size); - LockComponent _elem597; - for (int _i598 = 0; _i598 < _list596.size; ++_i598) + org.apache.thrift.protocol.TList _list612 = iprot.readListBegin(); + struct.component = new ArrayList(_list612.size); + LockComponent _elem613; + for (int _i614 = 0; _i614 < _list612.size; ++_i614) { - _elem597 = new LockComponent(); - _elem597.read(iprot); - struct.component.add(_elem597); + _elem613 = new LockComponent(); + _elem613.read(iprot); + struct.component.add(_elem613); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter599 : struct.component) + for (LockComponent _iter615 : struct.component) { - _iter599.write(oprot); + _iter615.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter600 : struct.component) + for (LockComponent _iter616 : struct.component) { - _iter600.write(oprot); + _iter616.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list601 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list601.size); - LockComponent _elem602; - for (int _i603 = 0; _i603 < _list601.size; ++_i603) + org.apache.thrift.protocol.TList _list617 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list617.size); + LockComponent _elem618; + for (int _i619 = 0; _i619 < _list617.size; ++_i619) { - _elem602 = new LockComponent(); - _elem602.read(iprot); - struct.component.add(_elem602); + _elem618 = new LockComponent(); + _elem618.read(iprot); + struct.component.add(_elem618); } } struct.setComponentIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java index c91b9cfea9..d94a92975a 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java @@ -518,13 +518,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization str case 1: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set786 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set786.size); - String _elem787; - for (int _i788 = 0; _i788 < _set786.size; ++_i788) + org.apache.thrift.protocol.TSet _set802 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set802.size); + String _elem803; + for (int _i804 = 0; _i804 < _set802.size; ++_i804) { - _elem787 = iprot.readString(); - struct.tablesUsed.add(_elem787); + _elem803 = iprot.readString(); + struct.tablesUsed.add(_elem803); } iprot.readSetEnd(); } @@ -566,9 +566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization st oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter789 : struct.tablesUsed) + for (String _iter805 : struct.tablesUsed) { - oprot.writeString(_iter789); + oprot.writeString(_iter805); } oprot.writeSetEnd(); } @@ -603,9 +603,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter790 : struct.tablesUsed) + for (String _iter806 : struct.tablesUsed) { - oprot.writeString(_iter790); + oprot.writeString(_iter806); } } oprot.writeI64(struct.invalidationTime); @@ -623,13 +623,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str public void read(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set791 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set791.size); - String _elem792; - for (int _i793 = 0; _i793 < _set791.size; ++_i793) + org.apache.thrift.protocol.TSet _set807 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set807.size); + String _elem808; + for (int _i809 = 0; _i809 < _set807.size; ++_i809) { - _elem792 = iprot.readString(); - struct.tablesUsed.add(_elem792); + _elem808 = iprot.readString(); + struct.tablesUsed.add(_elem808); } } struct.setTablesUsedIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index 549c14b119..3405ecc2c3 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list662 = iprot.readListBegin(); - struct.events = new ArrayList(_list662.size); - NotificationEvent _elem663; - for (int _i664 = 0; _i664 < _list662.size; ++_i664) + org.apache.thrift.protocol.TList _list678 = iprot.readListBegin(); + struct.events = new ArrayList(_list678.size); + NotificationEvent _elem679; + for (int _i680 = 0; _i680 < _list678.size; ++_i680) { - _elem663 = new NotificationEvent(); - _elem663.read(iprot); - struct.events.add(_elem663); + _elem679 = new NotificationEvent(); + _elem679.read(iprot); + struct.events.add(_elem679); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter665 : struct.events) + for (NotificationEvent _iter681 : struct.events) { - _iter665.write(oprot); + _iter681.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter666 : struct.events) + for (NotificationEvent _iter682 : struct.events) { - _iter666.write(oprot); + _iter682.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list667.size); - NotificationEvent _elem668; - for (int _i669 = 0; _i669 < _list667.size; ++_i669) + org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list683.size); + NotificationEvent _elem684; + for (int _i685 = 0; _i685 < _list683.size; ++_i685) { - _elem668 = new NotificationEvent(); - _elem668.read(iprot); - struct.events.add(_elem668); + _elem684 = new NotificationEvent(); + _elem684.read(iprot); + struct.events.add(_elem684); } } struct.setEventsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index ee7ae396f1..672c6886c4 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list540.size); - long _elem541; - for (int _i542 = 0; _i542 < _list540.size; ++_i542) + org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list556.size); + long _elem557; + for (int _i558 = 0; _i558 < _list556.size; ++_i558) { - _elem541 = iprot.readI64(); - struct.txn_ids.add(_elem541); + _elem557 = iprot.readI64(); + struct.txn_ids.add(_elem557); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter543 : struct.txn_ids) + for (long _iter559 : struct.txn_ids) { - oprot.writeI64(_iter543); + oprot.writeI64(_iter559); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter544 : struct.txn_ids) + for (long _iter560 : struct.txn_ids) { - oprot.writeI64(_iter544); + oprot.writeI64(_iter560); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list545.size); - long _elem546; - for (int _i547 = 0; _i547 < _list545.size; ++_i547) + org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list561.size); + long _elem562; + for (int _i563 = 0; _i563 < _list561.size; ++_i563) { - _elem546 = iprot.readI64(); - struct.txn_ids.add(_elem546); + _elem562 = iprot.readI64(); + struct.txn_ids.add(_elem562); } } struct.setTxn_idsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index 2283c24e0c..93a5034606 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -961,14 +961,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 3: // PARTITION_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); - struct.partitionKeys = new ArrayList(_list484.size); - FieldSchema _elem485; - for (int _i486 = 0; _i486 < _list484.size; ++_i486) + org.apache.thrift.protocol.TList _list500 = iprot.readListBegin(); + struct.partitionKeys = new ArrayList(_list500.size); + FieldSchema _elem501; + for (int _i502 = 0; _i502 < _list500.size; ++_i502) { - _elem485 = new FieldSchema(); - _elem485.read(iprot); - struct.partitionKeys.add(_elem485); + _elem501 = new FieldSchema(); + _elem501.read(iprot); + struct.partitionKeys.add(_elem501); } iprot.readListEnd(); } @@ -996,14 +996,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 6: // PARTITION_ORDER if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list487 = iprot.readListBegin(); - struct.partitionOrder = new ArrayList(_list487.size); - FieldSchema _elem488; - for (int _i489 = 0; _i489 < _list487.size; ++_i489) + org.apache.thrift.protocol.TList _list503 = iprot.readListBegin(); + struct.partitionOrder = new ArrayList(_list503.size); + FieldSchema _elem504; + for (int _i505 = 0; _i505 < _list503.size; ++_i505) { - _elem488 = new FieldSchema(); - _elem488.read(iprot); - struct.partitionOrder.add(_elem488); + _elem504 = new FieldSchema(); + _elem504.read(iprot); + struct.partitionOrder.add(_elem504); } iprot.readListEnd(); } @@ -1055,9 +1055,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size())); - for (FieldSchema _iter490 : struct.partitionKeys) + for (FieldSchema _iter506 : struct.partitionKeys) { - _iter490.write(oprot); + _iter506.write(oprot); } oprot.writeListEnd(); } @@ -1080,9 +1080,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_ORDER_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionOrder.size())); - for (FieldSchema _iter491 : struct.partitionOrder) + for (FieldSchema _iter507 : struct.partitionOrder) { - _iter491.write(oprot); + _iter507.write(oprot); } oprot.writeListEnd(); } @@ -1120,9 +1120,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.partitionKeys.size()); - for (FieldSchema _iter492 : struct.partitionKeys) + for (FieldSchema _iter508 : struct.partitionKeys) { - _iter492.write(oprot); + _iter508.write(oprot); } } BitSet optionals = new BitSet(); @@ -1151,9 +1151,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetPartitionOrder()) { { oprot.writeI32(struct.partitionOrder.size()); - for (FieldSchema _iter493 : struct.partitionOrder) + for (FieldSchema _iter509 : struct.partitionOrder) { - _iter493.write(oprot); + _iter509.write(oprot); } } } @@ -1173,14 +1173,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list494 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionKeys = new ArrayList(_list494.size); - FieldSchema _elem495; - for (int _i496 = 0; _i496 < _list494.size; ++_i496) + org.apache.thrift.protocol.TList _list510 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionKeys = new ArrayList(_list510.size); + FieldSchema _elem511; + for (int _i512 = 0; _i512 < _list510.size; ++_i512) { - _elem495 = new FieldSchema(); - _elem495.read(iprot); - struct.partitionKeys.add(_elem495); + _elem511 = new FieldSchema(); + _elem511.read(iprot); + struct.partitionKeys.add(_elem511); } } struct.setPartitionKeysIsSet(true); @@ -1195,14 +1195,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionOrder = new ArrayList(_list497.size); - FieldSchema _elem498; - for (int _i499 = 0; _i499 < _list497.size; ++_i499) + org.apache.thrift.protocol.TList _list513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionOrder = new ArrayList(_list513.size); + FieldSchema _elem514; + for (int _i515 = 0; _i515 < _list513.size; ++_i515) { - _elem498 = new FieldSchema(); - _elem498.read(iprot); - struct.partitionOrder.add(_elem498); + _elem514 = new FieldSchema(); + _elem514.read(iprot); + struct.partitionOrder.add(_elem514); } } struct.setPartitionOrderIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java index f551156768..e1e0de4dca 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesResp case 1: // PARTITION_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list508 = iprot.readListBegin(); - struct.partitionValues = new ArrayList(_list508.size); - PartitionValuesRow _elem509; - for (int _i510 = 0; _i510 < _list508.size; ++_i510) + org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); + struct.partitionValues = new ArrayList(_list524.size); + PartitionValuesRow _elem525; + for (int _i526 = 0; _i526 < _list524.size; ++_i526) { - _elem509 = new PartitionValuesRow(); - _elem509.read(iprot); - struct.partitionValues.add(_elem509); + _elem525 = new PartitionValuesRow(); + _elem525.read(iprot); + struct.partitionValues.add(_elem525); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRes oprot.writeFieldBegin(PARTITION_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionValues.size())); - for (PartitionValuesRow _iter511 : struct.partitionValues) + for (PartitionValuesRow _iter527 : struct.partitionValues) { - _iter511.write(oprot); + _iter527.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitionValues.size()); - for (PartitionValuesRow _iter512 : struct.partitionValues) + for (PartitionValuesRow _iter528 : struct.partitionValues) { - _iter512.write(oprot); + _iter528.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionValues = new ArrayList(_list513.size); - PartitionValuesRow _elem514; - for (int _i515 = 0; _i515 < _list513.size; ++_i515) + org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionValues = new ArrayList(_list529.size); + PartitionValuesRow _elem530; + for (int _i531 = 0; _i531 < _list529.size; ++_i531) { - _elem514 = new PartitionValuesRow(); - _elem514.read(iprot); - struct.partitionValues.add(_elem514); + _elem530 = new PartitionValuesRow(); + _elem530.read(iprot); + struct.partitionValues.add(_elem530); } } struct.setPartitionValuesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java index 3f3c3b9e4d..e39063f71b 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRow case 1: // ROW if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list500 = iprot.readListBegin(); - struct.row = new ArrayList(_list500.size); - String _elem501; - for (int _i502 = 0; _i502 < _list500.size; ++_i502) + org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); + struct.row = new ArrayList(_list516.size); + String _elem517; + for (int _i518 = 0; _i518 < _list516.size; ++_i518) { - _elem501 = iprot.readString(); - struct.row.add(_elem501); + _elem517 = iprot.readString(); + struct.row.add(_elem517); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRow oprot.writeFieldBegin(ROW_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.row.size())); - for (String _iter503 : struct.row) + for (String _iter519 : struct.row) { - oprot.writeString(_iter503); + oprot.writeString(_iter519); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.row.size()); - for (String _iter504 : struct.row) + for (String _iter520 : struct.row) { - oprot.writeString(_iter504); + oprot.writeString(_iter520); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.row = new ArrayList(_list505.size); - String _elem506; - for (int _i507 = 0; _i507 < _list505.size; ++_i507) + org.apache.thrift.protocol.TList _list521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.row = new ArrayList(_list521.size); + String _elem522; + for (int _i523 = 0; _i523 < _list521.size; ++_i523) { - _elem506 = iprot.readString(); - struct.row.add(_elem506); + _elem522 = iprot.readString(); + struct.row.add(_elem522); } } struct.setRowIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index 3ccf5ee5cb..1069accfb3 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRes case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list386 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list386.size); - Partition _elem387; - for (int _i388 = 0; _i388 < _list386.size; ++_i388) + org.apache.thrift.protocol.TList _list402 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list402.size); + Partition _elem403; + for (int _i404 = 0; _i404 < _list402.size; ++_i404) { - _elem387 = new Partition(); - _elem387.read(iprot); - struct.partitions.add(_elem387); + _elem403 = new Partition(); + _elem403.read(iprot); + struct.partitions.add(_elem403); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter389 : struct.partitions) + for (Partition _iter405 : struct.partitions) { - _iter389.write(oprot); + _iter405.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter390 : struct.partitions) + for (Partition _iter406 : struct.partitions) { - _iter390.write(oprot); + _iter406.write(oprot); } } oprot.writeBool(struct.hasUnknownPartitions); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list391.size); - Partition _elem392; - for (int _i393 = 0; _i393 < _list391.size; ++_i393) + org.apache.thrift.protocol.TList _list407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list407.size); + Partition _elem408; + for (int _i409 = 0; _i409 < _list407.size; ++_i409) { - _elem392 = new Partition(); - _elem392.read(iprot); - struct.partitions.add(_elem392); + _elem408 = new Partition(); + _elem408.read(iprot); + struct.partitions.add(_elem408); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 9941fa5603..1de1233336 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -639,13 +639,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list428 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list428.size); - String _elem429; - for (int _i430 = 0; _i430 < _list428.size; ++_i430) + org.apache.thrift.protocol.TList _list444 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list444.size); + String _elem445; + for (int _i446 = 0; _i446 < _list444.size; ++_i446) { - _elem429 = iprot.readString(); - struct.colNames.add(_elem429); + _elem445 = iprot.readString(); + struct.colNames.add(_elem445); } iprot.readListEnd(); } @@ -657,13 +657,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 4: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list431 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list431.size); - String _elem432; - for (int _i433 = 0; _i433 < _list431.size; ++_i433) + org.apache.thrift.protocol.TList _list447 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list447.size); + String _elem448; + for (int _i449 = 0; _i449 < _list447.size; ++_i449) { - _elem432 = iprot.readString(); - struct.partNames.add(_elem432); + _elem448 = iprot.readString(); + struct.partNames.add(_elem448); } iprot.readListEnd(); } @@ -699,9 +699,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter434 : struct.colNames) + for (String _iter450 : struct.colNames) { - oprot.writeString(_iter434); + oprot.writeString(_iter450); } oprot.writeListEnd(); } @@ -711,9 +711,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter435 : struct.partNames) + for (String _iter451 : struct.partNames) { - oprot.writeString(_iter435); + oprot.writeString(_iter451); } oprot.writeListEnd(); } @@ -740,16 +740,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter436 : struct.colNames) + for (String _iter452 : struct.colNames) { - oprot.writeString(_iter436); + oprot.writeString(_iter452); } } { oprot.writeI32(struct.partNames.size()); - for (String _iter437 : struct.partNames) + for (String _iter453 : struct.partNames) { - oprot.writeString(_iter437); + oprot.writeString(_iter453); } } } @@ -762,24 +762,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list438 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list438.size); - String _elem439; - for (int _i440 = 0; _i440 < _list438.size; ++_i440) + org.apache.thrift.protocol.TList _list454 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list454.size); + String _elem455; + for (int _i456 = 0; _i456 < _list454.size; ++_i456) { - _elem439 = iprot.readString(); - struct.colNames.add(_elem439); + _elem455 = iprot.readString(); + struct.colNames.add(_elem455); } } struct.setColNamesIsSet(true); { - org.apache.thrift.protocol.TList _list441 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list441.size); - String _elem442; - for (int _i443 = 0; _i443 < _list441.size; ++_i443) + org.apache.thrift.protocol.TList _list457 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list457.size); + String _elem458; + for (int _i459 = 0; _i459 < _list457.size; ++_i459) { - _elem442 = iprot.readString(); - struct.partNames.add(_elem442); + _elem458 = iprot.readString(); + struct.partNames.add(_elem458); } } struct.setPartNamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index 8a0e5a5e79..c2183e50e4 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -363,26 +363,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu case 1: // PART_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map402 = iprot.readMapBegin(); - struct.partStats = new HashMap>(2*_map402.size); - String _key403; - List _val404; - for (int _i405 = 0; _i405 < _map402.size; ++_i405) + org.apache.thrift.protocol.TMap _map418 = iprot.readMapBegin(); + struct.partStats = new HashMap>(2*_map418.size); + String _key419; + List _val420; + for (int _i421 = 0; _i421 < _map418.size; ++_i421) { - _key403 = iprot.readString(); + _key419 = iprot.readString(); { - org.apache.thrift.protocol.TList _list406 = iprot.readListBegin(); - _val404 = new ArrayList(_list406.size); - ColumnStatisticsObj _elem407; - for (int _i408 = 0; _i408 < _list406.size; ++_i408) + org.apache.thrift.protocol.TList _list422 = iprot.readListBegin(); + _val420 = new ArrayList(_list422.size); + ColumnStatisticsObj _elem423; + for (int _i424 = 0; _i424 < _list422.size; ++_i424) { - _elem407 = new ColumnStatisticsObj(); - _elem407.read(iprot); - _val404.add(_elem407); + _elem423 = new ColumnStatisticsObj(); + _elem423.read(iprot); + _val420.add(_elem423); } iprot.readListEnd(); } - struct.partStats.put(_key403, _val404); + struct.partStats.put(_key419, _val420); } iprot.readMapEnd(); } @@ -408,14 +408,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes oprot.writeFieldBegin(PART_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size())); - for (Map.Entry> _iter409 : struct.partStats.entrySet()) + for (Map.Entry> _iter425 : struct.partStats.entrySet()) { - oprot.writeString(_iter409.getKey()); + oprot.writeString(_iter425.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter409.getValue().size())); - for (ColumnStatisticsObj _iter410 : _iter409.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter425.getValue().size())); + for (ColumnStatisticsObj _iter426 : _iter425.getValue()) { - _iter410.write(oprot); + _iter426.write(oprot); } oprot.writeListEnd(); } @@ -443,14 +443,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partStats.size()); - for (Map.Entry> _iter411 : struct.partStats.entrySet()) + for (Map.Entry> _iter427 : struct.partStats.entrySet()) { - oprot.writeString(_iter411.getKey()); + oprot.writeString(_iter427.getKey()); { - oprot.writeI32(_iter411.getValue().size()); - for (ColumnStatisticsObj _iter412 : _iter411.getValue()) + oprot.writeI32(_iter427.getValue().size()); + for (ColumnStatisticsObj _iter428 : _iter427.getValue()) { - _iter412.write(oprot); + _iter428.write(oprot); } } } @@ -461,25 +461,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map413 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.partStats = new HashMap>(2*_map413.size); - String _key414; - List _val415; - for (int _i416 = 0; _i416 < _map413.size; ++_i416) + org.apache.thrift.protocol.TMap _map429 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.partStats = new HashMap>(2*_map429.size); + String _key430; + List _val431; + for (int _i432 = 0; _i432 < _map429.size; ++_i432) { - _key414 = iprot.readString(); + _key430 = iprot.readString(); { - org.apache.thrift.protocol.TList _list417 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val415 = new ArrayList(_list417.size); - ColumnStatisticsObj _elem418; - for (int _i419 = 0; _i419 < _list417.size; ++_i419) + org.apache.thrift.protocol.TList _list433 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val431 = new ArrayList(_list433.size); + ColumnStatisticsObj _elem434; + for (int _i435 = 0; _i435 < _list433.size; ++_i435) { - _elem418 = new ColumnStatisticsObj(); - _elem418.read(iprot); - _val415.add(_elem418); + _elem434 = new ColumnStatisticsObj(); + _elem434.read(iprot); + _val431.add(_elem434); } } - struct.partStats.put(_key414, _val415); + struct.partStats.put(_key430, _val431); } } struct.setPartStatsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index e4089c5f27..ba2b320d73 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list730.size); - long _elem731; - for (int _i732 = 0; _i732 < _list730.size; ++_i732) + org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list746.size); + long _elem747; + for (int _i748 = 0; _i748 < _list746.size; ++_i748) { - _elem731 = iprot.readI64(); - struct.fileIds.add(_elem731); + _elem747 = iprot.readI64(); + struct.fileIds.add(_elem747); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list733 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list733.size); - ByteBuffer _elem734; - for (int _i735 = 0; _i735 < _list733.size; ++_i735) + org.apache.thrift.protocol.TList _list749 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list749.size); + ByteBuffer _elem750; + for (int _i751 = 0; _i751 < _list749.size; ++_i751) { - _elem734 = iprot.readBinary(); - struct.metadata.add(_elem734); + _elem750 = iprot.readBinary(); + struct.metadata.add(_elem750); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter736 : struct.fileIds) + for (long _iter752 : struct.fileIds) { - oprot.writeI64(_iter736); + oprot.writeI64(_iter752); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter737 : struct.metadata) + for (ByteBuffer _iter753 : struct.metadata) { - oprot.writeBinary(_iter737); + oprot.writeBinary(_iter753); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter738 : struct.fileIds) + for (long _iter754 : struct.fileIds) { - oprot.writeI64(_iter738); + oprot.writeI64(_iter754); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter739 : struct.metadata) + for (ByteBuffer _iter755 : struct.metadata) { - oprot.writeBinary(_iter739); + oprot.writeBinary(_iter755); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list740 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list740.size); - long _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list756 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list756.size); + long _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem741 = iprot.readI64(); - struct.fileIds.add(_elem741); + _elem757 = iprot.readI64(); + struct.fileIds.add(_elem757); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list743.size); - ByteBuffer _elem744; - for (int _i745 = 0; _i745 < _list743.size; ++_i745) + org.apache.thrift.protocol.TList _list759 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list759.size); + ByteBuffer _elem760; + for (int _i761 = 0; _i761 < _list759.size; ++_i761) { - _elem744 = iprot.readBinary(); - struct.metadata.add(_elem744); + _elem760 = iprot.readBinary(); + struct.metadata.add(_elem760); } } struct.setMetadataIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java index d1b52476e4..96fd264bcd 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java @@ -168,13 +168,13 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == NAMES_FIELD_DESC.type) { List names; { - org.apache.thrift.protocol.TList _list468 = iprot.readListBegin(); - names = new ArrayList(_list468.size); - String _elem469; - for (int _i470 = 0; _i470 < _list468.size; ++_i470) + org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); + names = new ArrayList(_list484.size); + String _elem485; + for (int _i486 = 0; _i486 < _list484.size; ++_i486) { - _elem469 = iprot.readString(); - names.add(_elem469); + _elem485 = iprot.readString(); + names.add(_elem485); } iprot.readListEnd(); } @@ -187,14 +187,14 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == EXPRS_FIELD_DESC.type) { List exprs; { - org.apache.thrift.protocol.TList _list471 = iprot.readListBegin(); - exprs = new ArrayList(_list471.size); - DropPartitionsExpr _elem472; - for (int _i473 = 0; _i473 < _list471.size; ++_i473) + org.apache.thrift.protocol.TList _list487 = iprot.readListBegin(); + exprs = new ArrayList(_list487.size); + DropPartitionsExpr _elem488; + for (int _i489 = 0; _i489 < _list487.size; ++_i489) { - _elem472 = new DropPartitionsExpr(); - _elem472.read(iprot); - exprs.add(_elem472); + _elem488 = new DropPartitionsExpr(); + _elem488.read(iprot); + exprs.add(_elem488); } iprot.readListEnd(); } @@ -219,9 +219,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter474 : names) + for (String _iter490 : names) { - oprot.writeString(_iter474); + oprot.writeString(_iter490); } oprot.writeListEnd(); } @@ -230,9 +230,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter475 : exprs) + for (DropPartitionsExpr _iter491 : exprs) { - _iter475.write(oprot); + _iter491.write(oprot); } oprot.writeListEnd(); } @@ -250,13 +250,13 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case NAMES: List names; { - org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); - names = new ArrayList(_list476.size); - String _elem477; - for (int _i478 = 0; _i478 < _list476.size; ++_i478) + org.apache.thrift.protocol.TList _list492 = iprot.readListBegin(); + names = new ArrayList(_list492.size); + String _elem493; + for (int _i494 = 0; _i494 < _list492.size; ++_i494) { - _elem477 = iprot.readString(); - names.add(_elem477); + _elem493 = iprot.readString(); + names.add(_elem493); } iprot.readListEnd(); } @@ -264,14 +264,14 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case EXPRS: List exprs; { - org.apache.thrift.protocol.TList _list479 = iprot.readListBegin(); - exprs = new ArrayList(_list479.size); - DropPartitionsExpr _elem480; - for (int _i481 = 0; _i481 < _list479.size; ++_i481) + org.apache.thrift.protocol.TList _list495 = iprot.readListBegin(); + exprs = new ArrayList(_list495.size); + DropPartitionsExpr _elem496; + for (int _i497 = 0; _i497 < _list495.size; ++_i497) { - _elem480 = new DropPartitionsExpr(); - _elem480.read(iprot); - exprs.add(_elem480); + _elem496 = new DropPartitionsExpr(); + _elem496.read(iprot); + exprs.add(_elem496); } iprot.readListEnd(); } @@ -291,9 +291,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter482 : names) + for (String _iter498 : names) { - oprot.writeString(_iter482); + oprot.writeString(_iter498); } oprot.writeListEnd(); } @@ -302,9 +302,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter483 : exprs) + for (DropPartitionsExpr _iter499 : exprs) { - _iter483.write(oprot); + _iter499.write(oprot); } oprot.writeListEnd(); } diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java new file mode 100644 index 0000000000..185b77ed21 --- /dev/null +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java @@ -0,0 +1,1109 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLDefaultConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLDefaultConstraint"); + + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new SQLDefaultConstraintStandardSchemeFactory()); + schemes.put(TupleScheme.class, new SQLDefaultConstraintTupleSchemeFactory()); + } + + private String table_db; // required + private String table_name; // required + private String column_name; // required + private String default_value; // required + private String dc_name; // required + private boolean enable_cstr; // required + private boolean validate_cstr; // required + private boolean rely_cstr; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_DB((short)1, "table_db"), + TABLE_NAME((short)2, "table_name"), + COLUMN_NAME((short)3, "column_name"), + DEFAULT_VALUE((short)4, "default_value"), + DC_NAME((short)5, "dc_name"), + ENABLE_CSTR((short)6, "enable_cstr"), + VALIDATE_CSTR((short)7, "validate_cstr"), + RELY_CSTR((short)8, "rely_cstr"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_DB + return TABLE_DB; + case 2: // TABLE_NAME + return TABLE_NAME; + case 3: // COLUMN_NAME + return COLUMN_NAME; + case 4: // DEFAULT_VALUE + return DEFAULT_VALUE; + case 5: // DC_NAME + return DC_NAME; + case 6: // ENABLE_CSTR + return ENABLE_CSTR; + case 7: // VALIDATE_CSTR + return VALIDATE_CSTR; + case 8: // RELY_CSTR + return RELY_CSTR; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ENABLE_CSTR_ISSET_ID = 0; + private static final int __VALIDATE_CSTR_ISSET_ID = 1; + private static final int __RELY_CSTR_ISSET_ID = 2; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("column_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DEFAULT_VALUE, new org.apache.thrift.meta_data.FieldMetaData("default_value", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DC_NAME, new org.apache.thrift.meta_data.FieldMetaData("dc_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ENABLE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("enable_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.VALIDATE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("validate_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLDefaultConstraint.class, metaDataMap); + } + + public SQLDefaultConstraint() { + } + + public SQLDefaultConstraint( + String table_db, + String table_name, + String column_name, + String default_value, + String dc_name, + boolean enable_cstr, + boolean validate_cstr, + boolean rely_cstr) + { + this(); + this.table_db = table_db; + this.table_name = table_name; + this.column_name = column_name; + this.default_value = default_value; + this.dc_name = dc_name; + this.enable_cstr = enable_cstr; + setEnable_cstrIsSet(true); + this.validate_cstr = validate_cstr; + setValidate_cstrIsSet(true); + this.rely_cstr = rely_cstr; + setRely_cstrIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public SQLDefaultConstraint(SQLDefaultConstraint other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetTable_db()) { + this.table_db = other.table_db; + } + if (other.isSetTable_name()) { + this.table_name = other.table_name; + } + if (other.isSetColumn_name()) { + this.column_name = other.column_name; + } + if (other.isSetDefault_value()) { + this.default_value = other.default_value; + } + if (other.isSetDc_name()) { + this.dc_name = other.dc_name; + } + this.enable_cstr = other.enable_cstr; + this.validate_cstr = other.validate_cstr; + this.rely_cstr = other.rely_cstr; + } + + public SQLDefaultConstraint deepCopy() { + return new SQLDefaultConstraint(this); + } + + @Override + public void clear() { + this.table_db = null; + this.table_name = null; + this.column_name = null; + this.default_value = null; + this.dc_name = null; + setEnable_cstrIsSet(false); + this.enable_cstr = false; + setValidate_cstrIsSet(false); + this.validate_cstr = false; + setRely_cstrIsSet(false); + this.rely_cstr = false; + } + + public String getTable_db() { + return this.table_db; + } + + public void setTable_db(String table_db) { + this.table_db = table_db; + } + + public void unsetTable_db() { + this.table_db = null; + } + + /** Returns true if field table_db is set (has been assigned a value) and false otherwise */ + public boolean isSetTable_db() { + return this.table_db != null; + } + + public void setTable_dbIsSet(boolean value) { + if (!value) { + this.table_db = null; + } + } + + public String getTable_name() { + return this.table_name; + } + + public void setTable_name(String table_name) { + this.table_name = table_name; + } + + public void unsetTable_name() { + this.table_name = null; + } + + /** Returns true if field table_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTable_name() { + return this.table_name != null; + } + + public void setTable_nameIsSet(boolean value) { + if (!value) { + this.table_name = null; + } + } + + public String getColumn_name() { + return this.column_name; + } + + public void setColumn_name(String column_name) { + this.column_name = column_name; + } + + public void unsetColumn_name() { + this.column_name = null; + } + + /** Returns true if field column_name is set (has been assigned a value) and false otherwise */ + public boolean isSetColumn_name() { + return this.column_name != null; + } + + public void setColumn_nameIsSet(boolean value) { + if (!value) { + this.column_name = null; + } + } + + public String getDefault_value() { + return this.default_value; + } + + public void setDefault_value(String default_value) { + this.default_value = default_value; + } + + public void unsetDefault_value() { + this.default_value = null; + } + + /** Returns true if field default_value is set (has been assigned a value) and false otherwise */ + public boolean isSetDefault_value() { + return this.default_value != null; + } + + public void setDefault_valueIsSet(boolean value) { + if (!value) { + this.default_value = null; + } + } + + public String getDc_name() { + return this.dc_name; + } + + public void setDc_name(String dc_name) { + this.dc_name = dc_name; + } + + public void unsetDc_name() { + this.dc_name = null; + } + + /** Returns true if field dc_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDc_name() { + return this.dc_name != null; + } + + public void setDc_nameIsSet(boolean value) { + if (!value) { + this.dc_name = null; + } + } + + public boolean isEnable_cstr() { + return this.enable_cstr; + } + + public void setEnable_cstr(boolean enable_cstr) { + this.enable_cstr = enable_cstr; + setEnable_cstrIsSet(true); + } + + public void unsetEnable_cstr() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID); + } + + /** Returns true if field enable_cstr is set (has been assigned a value) and false otherwise */ + public boolean isSetEnable_cstr() { + return EncodingUtils.testBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID); + } + + public void setEnable_cstrIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID, value); + } + + public boolean isValidate_cstr() { + return this.validate_cstr; + } + + public void setValidate_cstr(boolean validate_cstr) { + this.validate_cstr = validate_cstr; + setValidate_cstrIsSet(true); + } + + public void unsetValidate_cstr() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID); + } + + /** Returns true if field validate_cstr is set (has been assigned a value) and false otherwise */ + public boolean isSetValidate_cstr() { + return EncodingUtils.testBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID); + } + + public void setValidate_cstrIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID, value); + } + + public boolean isRely_cstr() { + return this.rely_cstr; + } + + public void setRely_cstr(boolean rely_cstr) { + this.rely_cstr = rely_cstr; + setRely_cstrIsSet(true); + } + + public void unsetRely_cstr() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RELY_CSTR_ISSET_ID); + } + + /** Returns true if field rely_cstr is set (has been assigned a value) and false otherwise */ + public boolean isSetRely_cstr() { + return EncodingUtils.testBit(__isset_bitfield, __RELY_CSTR_ISSET_ID); + } + + public void setRely_cstrIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_DB: + if (value == null) { + unsetTable_db(); + } else { + setTable_db((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTable_name(); + } else { + setTable_name((String)value); + } + break; + + case COLUMN_NAME: + if (value == null) { + unsetColumn_name(); + } else { + setColumn_name((String)value); + } + break; + + case DEFAULT_VALUE: + if (value == null) { + unsetDefault_value(); + } else { + setDefault_value((String)value); + } + break; + + case DC_NAME: + if (value == null) { + unsetDc_name(); + } else { + setDc_name((String)value); + } + break; + + case ENABLE_CSTR: + if (value == null) { + unsetEnable_cstr(); + } else { + setEnable_cstr((Boolean)value); + } + break; + + case VALIDATE_CSTR: + if (value == null) { + unsetValidate_cstr(); + } else { + setValidate_cstr((Boolean)value); + } + break; + + case RELY_CSTR: + if (value == null) { + unsetRely_cstr(); + } else { + setRely_cstr((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_DB: + return getTable_db(); + + case TABLE_NAME: + return getTable_name(); + + case COLUMN_NAME: + return getColumn_name(); + + case DEFAULT_VALUE: + return getDefault_value(); + + case DC_NAME: + return getDc_name(); + + case ENABLE_CSTR: + return isEnable_cstr(); + + case VALIDATE_CSTR: + return isValidate_cstr(); + + case RELY_CSTR: + return isRely_cstr(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_DB: + return isSetTable_db(); + case TABLE_NAME: + return isSetTable_name(); + case COLUMN_NAME: + return isSetColumn_name(); + case DEFAULT_VALUE: + return isSetDefault_value(); + case DC_NAME: + return isSetDc_name(); + case ENABLE_CSTR: + return isSetEnable_cstr(); + case VALIDATE_CSTR: + return isSetValidate_cstr(); + case RELY_CSTR: + return isSetRely_cstr(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof SQLDefaultConstraint) + return this.equals((SQLDefaultConstraint)that); + return false; + } + + public boolean equals(SQLDefaultConstraint that) { + if (that == null) + return false; + + boolean this_present_table_db = true && this.isSetTable_db(); + boolean that_present_table_db = true && that.isSetTable_db(); + if (this_present_table_db || that_present_table_db) { + if (!(this_present_table_db && that_present_table_db)) + return false; + if (!this.table_db.equals(that.table_db)) + return false; + } + + boolean this_present_table_name = true && this.isSetTable_name(); + boolean that_present_table_name = true && that.isSetTable_name(); + if (this_present_table_name || that_present_table_name) { + if (!(this_present_table_name && that_present_table_name)) + return false; + if (!this.table_name.equals(that.table_name)) + return false; + } + + boolean this_present_column_name = true && this.isSetColumn_name(); + boolean that_present_column_name = true && that.isSetColumn_name(); + if (this_present_column_name || that_present_column_name) { + if (!(this_present_column_name && that_present_column_name)) + return false; + if (!this.column_name.equals(that.column_name)) + return false; + } + + boolean this_present_default_value = true && this.isSetDefault_value(); + boolean that_present_default_value = true && that.isSetDefault_value(); + if (this_present_default_value || that_present_default_value) { + if (!(this_present_default_value && that_present_default_value)) + return false; + if (!this.default_value.equals(that.default_value)) + return false; + } + + boolean this_present_dc_name = true && this.isSetDc_name(); + boolean that_present_dc_name = true && that.isSetDc_name(); + if (this_present_dc_name || that_present_dc_name) { + if (!(this_present_dc_name && that_present_dc_name)) + return false; + if (!this.dc_name.equals(that.dc_name)) + return false; + } + + boolean this_present_enable_cstr = true; + boolean that_present_enable_cstr = true; + if (this_present_enable_cstr || that_present_enable_cstr) { + if (!(this_present_enable_cstr && that_present_enable_cstr)) + return false; + if (this.enable_cstr != that.enable_cstr) + return false; + } + + boolean this_present_validate_cstr = true; + boolean that_present_validate_cstr = true; + if (this_present_validate_cstr || that_present_validate_cstr) { + if (!(this_present_validate_cstr && that_present_validate_cstr)) + return false; + if (this.validate_cstr != that.validate_cstr) + return false; + } + + boolean this_present_rely_cstr = true; + boolean that_present_rely_cstr = true; + if (this_present_rely_cstr || that_present_rely_cstr) { + if (!(this_present_rely_cstr && that_present_rely_cstr)) + return false; + if (this.rely_cstr != that.rely_cstr) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_table_db = true && (isSetTable_db()); + list.add(present_table_db); + if (present_table_db) + list.add(table_db); + + boolean present_table_name = true && (isSetTable_name()); + list.add(present_table_name); + if (present_table_name) + list.add(table_name); + + boolean present_column_name = true && (isSetColumn_name()); + list.add(present_column_name); + if (present_column_name) + list.add(column_name); + + boolean present_default_value = true && (isSetDefault_value()); + list.add(present_default_value); + if (present_default_value) + list.add(default_value); + + boolean present_dc_name = true && (isSetDc_name()); + list.add(present_dc_name); + if (present_dc_name) + list.add(dc_name); + + boolean present_enable_cstr = true; + list.add(present_enable_cstr); + if (present_enable_cstr) + list.add(enable_cstr); + + boolean present_validate_cstr = true; + list.add(present_validate_cstr); + if (present_validate_cstr) + list.add(validate_cstr); + + boolean present_rely_cstr = true; + list.add(present_rely_cstr); + if (present_rely_cstr) + list.add(rely_cstr); + + return list.hashCode(); + } + + @Override + public int compareTo(SQLDefaultConstraint other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTable_db()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_db, other.table_db); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTable_name()).compareTo(other.isSetTable_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTable_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_name, other.table_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetColumn_name()).compareTo(other.isSetColumn_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetColumn_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_name, other.column_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDefault_value()).compareTo(other.isSetDefault_value()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDefault_value()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.default_value, other.default_value); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDc_name()).compareTo(other.isSetDc_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDc_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dc_name, other.dc_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEnable_cstr()).compareTo(other.isSetEnable_cstr()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEnable_cstr()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.enable_cstr, other.enable_cstr); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValidate_cstr()).compareTo(other.isSetValidate_cstr()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValidate_cstr()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validate_cstr, other.validate_cstr); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRely_cstr()).compareTo(other.isSetRely_cstr()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRely_cstr()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rely_cstr, other.rely_cstr); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("SQLDefaultConstraint("); + boolean first = true; + + sb.append("table_db:"); + if (this.table_db == null) { + sb.append("null"); + } else { + sb.append(this.table_db); + } + first = false; + if (!first) sb.append(", "); + sb.append("table_name:"); + if (this.table_name == null) { + sb.append("null"); + } else { + sb.append(this.table_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("column_name:"); + if (this.column_name == null) { + sb.append("null"); + } else { + sb.append(this.column_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("default_value:"); + if (this.default_value == null) { + sb.append("null"); + } else { + sb.append(this.default_value); + } + first = false; + if (!first) sb.append(", "); + sb.append("dc_name:"); + if (this.dc_name == null) { + sb.append("null"); + } else { + sb.append(this.dc_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("enable_cstr:"); + sb.append(this.enable_cstr); + first = false; + if (!first) sb.append(", "); + sb.append("validate_cstr:"); + sb.append(this.validate_cstr); + first = false; + if (!first) sb.append(", "); + sb.append("rely_cstr:"); + sb.append(this.rely_cstr); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class SQLDefaultConstraintStandardSchemeFactory implements SchemeFactory { + public SQLDefaultConstraintStandardScheme getScheme() { + return new SQLDefaultConstraintStandardScheme(); + } + } + + private static class SQLDefaultConstraintStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_DB + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.table_db = iprot.readString(); + struct.setTable_dbIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.table_name = iprot.readString(); + struct.setTable_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // COLUMN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.column_name = iprot.readString(); + struct.setColumn_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DEFAULT_VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.default_value = iprot.readString(); + struct.setDefault_valueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // DC_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dc_name = iprot.readString(); + struct.setDc_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // ENABLE_CSTR + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.enable_cstr = iprot.readBool(); + struct.setEnable_cstrIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // VALIDATE_CSTR + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.validate_cstr = iprot.readBool(); + struct.setValidate_cstrIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // RELY_CSTR + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.rely_cstr = iprot.readBool(); + struct.setRely_cstrIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.table_db != null) { + oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); + oprot.writeString(struct.table_db); + oprot.writeFieldEnd(); + } + if (struct.table_name != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.table_name); + oprot.writeFieldEnd(); + } + if (struct.column_name != null) { + oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); + oprot.writeString(struct.column_name); + oprot.writeFieldEnd(); + } + if (struct.default_value != null) { + oprot.writeFieldBegin(DEFAULT_VALUE_FIELD_DESC); + oprot.writeString(struct.default_value); + oprot.writeFieldEnd(); + } + if (struct.dc_name != null) { + oprot.writeFieldBegin(DC_NAME_FIELD_DESC); + oprot.writeString(struct.dc_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(ENABLE_CSTR_FIELD_DESC); + oprot.writeBool(struct.enable_cstr); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(VALIDATE_CSTR_FIELD_DESC); + oprot.writeBool(struct.validate_cstr); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC); + oprot.writeBool(struct.rely_cstr); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class SQLDefaultConstraintTupleSchemeFactory implements SchemeFactory { + public SQLDefaultConstraintTupleScheme getScheme() { + return new SQLDefaultConstraintTupleScheme(); + } + } + + private static class SQLDefaultConstraintTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetTable_db()) { + optionals.set(0); + } + if (struct.isSetTable_name()) { + optionals.set(1); + } + if (struct.isSetColumn_name()) { + optionals.set(2); + } + if (struct.isSetDefault_value()) { + optionals.set(3); + } + if (struct.isSetDc_name()) { + optionals.set(4); + } + if (struct.isSetEnable_cstr()) { + optionals.set(5); + } + if (struct.isSetValidate_cstr()) { + optionals.set(6); + } + if (struct.isSetRely_cstr()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); + if (struct.isSetTable_db()) { + oprot.writeString(struct.table_db); + } + if (struct.isSetTable_name()) { + oprot.writeString(struct.table_name); + } + if (struct.isSetColumn_name()) { + oprot.writeString(struct.column_name); + } + if (struct.isSetDefault_value()) { + oprot.writeString(struct.default_value); + } + if (struct.isSetDc_name()) { + oprot.writeString(struct.dc_name); + } + if (struct.isSetEnable_cstr()) { + oprot.writeBool(struct.enable_cstr); + } + if (struct.isSetValidate_cstr()) { + oprot.writeBool(struct.validate_cstr); + } + if (struct.isSetRely_cstr()) { + oprot.writeBool(struct.rely_cstr); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(8); + if (incoming.get(0)) { + struct.table_db = iprot.readString(); + struct.setTable_dbIsSet(true); + } + if (incoming.get(1)) { + struct.table_name = iprot.readString(); + struct.setTable_nameIsSet(true); + } + if (incoming.get(2)) { + struct.column_name = iprot.readString(); + struct.setColumn_nameIsSet(true); + } + if (incoming.get(3)) { + struct.default_value = iprot.readString(); + struct.setDefault_valueIsSet(true); + } + if (incoming.get(4)) { + struct.dc_name = iprot.readString(); + struct.setDc_nameIsSet(true); + } + if (incoming.get(5)) { + struct.enable_cstr = iprot.readBool(); + struct.setEnable_cstrIsSet(true); + } + if (incoming.get(6)) { + struct.validate_cstr = iprot.readBool(); + struct.setValidate_cstrIsSet(true); + } + if (incoming.get(7)) { + struct.rely_cstr = iprot.readBool(); + struct.setRely_cstrIsSet(true); + } + } + } + +} + diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index fb7b94e965..bd335efe8e 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list638 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list638.size); - ShowCompactResponseElement _elem639; - for (int _i640 = 0; _i640 < _list638.size; ++_i640) + org.apache.thrift.protocol.TList _list654 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list654.size); + ShowCompactResponseElement _elem655; + for (int _i656 = 0; _i656 < _list654.size; ++_i656) { - _elem639 = new ShowCompactResponseElement(); - _elem639.read(iprot); - struct.compacts.add(_elem639); + _elem655 = new ShowCompactResponseElement(); + _elem655.read(iprot); + struct.compacts.add(_elem655); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter641 : struct.compacts) + for (ShowCompactResponseElement _iter657 : struct.compacts) { - _iter641.write(oprot); + _iter657.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter642 : struct.compacts) + for (ShowCompactResponseElement _iter658 : struct.compacts) { - _iter642.write(oprot); + _iter658.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list643.size); - ShowCompactResponseElement _elem644; - for (int _i645 = 0; _i645 < _list643.size; ++_i645) + org.apache.thrift.protocol.TList _list659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list659.size); + ShowCompactResponseElement _elem660; + for (int _i661 = 0; _i661 < _list659.size; ++_i661) { - _elem644 = new ShowCompactResponseElement(); - _elem644.read(iprot); - struct.compacts.add(_elem644); + _elem660 = new ShowCompactResponseElement(); + _elem660.read(iprot); + struct.compacts.add(_elem660); } } struct.setCompactsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index 02dd278fa4..eff942b6b8 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list604 = iprot.readListBegin(); - struct.locks = new ArrayList(_list604.size); - ShowLocksResponseElement _elem605; - for (int _i606 = 0; _i606 < _list604.size; ++_i606) + org.apache.thrift.protocol.TList _list620 = iprot.readListBegin(); + struct.locks = new ArrayList(_list620.size); + ShowLocksResponseElement _elem621; + for (int _i622 = 0; _i622 < _list620.size; ++_i622) { - _elem605 = new ShowLocksResponseElement(); - _elem605.read(iprot); - struct.locks.add(_elem605); + _elem621 = new ShowLocksResponseElement(); + _elem621.read(iprot); + struct.locks.add(_elem621); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter607 : struct.locks) + for (ShowLocksResponseElement _iter623 : struct.locks) { - _iter607.write(oprot); + _iter623.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter608 : struct.locks) + for (ShowLocksResponseElement _iter624 : struct.locks) { - _iter608.write(oprot); + _iter624.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list609 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list609.size); - ShowLocksResponseElement _elem610; - for (int _i611 = 0; _i611 < _list609.size; ++_i611) + org.apache.thrift.protocol.TList _list625 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list625.size); + ShowLocksResponseElement _elem626; + for (int _i627 = 0; _i627 < _list625.size; ++_i627) { - _elem610 = new ShowLocksResponseElement(); - _elem610.read(iprot); - struct.locks.add(_elem610); + _elem626 = new ShowLocksResponseElement(); + _elem626.read(iprot); + struct.locks.add(_elem626); } } struct.setLocksIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index 69be837ef9..ad886b853c 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -537,13 +537,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list420 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list420.size); - String _elem421; - for (int _i422 = 0; _i422 < _list420.size; ++_i422) + org.apache.thrift.protocol.TList _list436 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list436.size); + String _elem437; + for (int _i438 = 0; _i438 < _list436.size; ++_i438) { - _elem421 = iprot.readString(); - struct.colNames.add(_elem421); + _elem437 = iprot.readString(); + struct.colNames.add(_elem437); } iprot.readListEnd(); } @@ -579,9 +579,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter423 : struct.colNames) + for (String _iter439 : struct.colNames) { - oprot.writeString(_iter423); + oprot.writeString(_iter439); } oprot.writeListEnd(); } @@ -608,9 +608,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter424 : struct.colNames) + for (String _iter440 : struct.colNames) { - oprot.writeString(_iter424); + oprot.writeString(_iter440); } } } @@ -623,13 +623,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list425 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list425.size); - String _elem426; - for (int _i427 = 0; _i427 < _list425.size; ++_i427) + org.apache.thrift.protocol.TList _list441 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list441.size); + String _elem442; + for (int _i443 = 0; _i443 < _list441.size; ++_i443) { - _elem426 = iprot.readString(); - struct.colNames.add(_elem426); + _elem442 = iprot.readString(); + struct.colNames.add(_elem442); } } struct.setColNamesIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index e65166ea0e..64af8ba907 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st case 1: // TABLE_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list394 = iprot.readListBegin(); - struct.tableStats = new ArrayList(_list394.size); - ColumnStatisticsObj _elem395; - for (int _i396 = 0; _i396 < _list394.size; ++_i396) + org.apache.thrift.protocol.TList _list410 = iprot.readListBegin(); + struct.tableStats = new ArrayList(_list410.size); + ColumnStatisticsObj _elem411; + for (int _i412 = 0; _i412 < _list410.size; ++_i412) { - _elem395 = new ColumnStatisticsObj(); - _elem395.read(iprot); - struct.tableStats.add(_elem395); + _elem411 = new ColumnStatisticsObj(); + _elem411.read(iprot); + struct.tableStats.add(_elem411); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size())); - for (ColumnStatisticsObj _iter397 : struct.tableStats) + for (ColumnStatisticsObj _iter413 : struct.tableStats) { - _iter397.write(oprot); + _iter413.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tableStats.size()); - for (ColumnStatisticsObj _iter398 : struct.tableStats) + for (ColumnStatisticsObj _iter414 : struct.tableStats) { - _iter398.write(oprot); + _iter414.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tableStats = new ArrayList(_list399.size); - ColumnStatisticsObj _elem400; - for (int _i401 = 0; _i401 < _list399.size; ++_i401) + org.apache.thrift.protocol.TList _list415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableStats = new ArrayList(_list415.size); + ColumnStatisticsObj _elem416; + for (int _i417 = 0; _i417 < _list415.size; ++_i417) { - _elem400 = new ColumnStatisticsObj(); - _elem400.read(iprot); - struct.tableStats.add(_elem400); + _elem416 = new ColumnStatisticsObj(); + _elem416.read(iprot); + struct.tableStats.add(_elem416); } } struct.setTableStatsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java index 1d43fb84a3..3f9b0d9e5b 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java @@ -708,13 +708,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableValidWriteIds case 3: // INVALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); - struct.invalidWriteIds = new ArrayList(_list564.size); - long _elem565; - for (int _i566 = 0; _i566 < _list564.size; ++_i566) + org.apache.thrift.protocol.TList _list580 = iprot.readListBegin(); + struct.invalidWriteIds = new ArrayList(_list580.size); + long _elem581; + for (int _i582 = 0; _i582 < _list580.size; ++_i582) { - _elem565 = iprot.readI64(); - struct.invalidWriteIds.add(_elem565); + _elem581 = iprot.readI64(); + struct.invalidWriteIds.add(_elem581); } iprot.readListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableValidWriteIds oprot.writeFieldBegin(INVALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.invalidWriteIds.size())); - for (long _iter567 : struct.invalidWriteIds) + for (long _iter583 : struct.invalidWriteIds) { - oprot.writeI64(_iter567); + oprot.writeI64(_iter583); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds oprot.writeI64(struct.writeIdHighWaterMark); { oprot.writeI32(struct.invalidWriteIds.size()); - for (long _iter568 : struct.invalidWriteIds) + for (long _iter584 : struct.invalidWriteIds) { - oprot.writeI64(_iter568); + oprot.writeI64(_iter584); } } oprot.writeBinary(struct.abortedBits); @@ -827,13 +827,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds s struct.writeIdHighWaterMark = iprot.readI64(); struct.setWriteIdHighWaterMarkIsSet(true); { - org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.invalidWriteIds = new ArrayList(_list569.size); - long _elem570; - for (int _i571 = 0; _i571 < _list569.size; ++_i571) + org.apache.thrift.protocol.TList _list585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.invalidWriteIds = new ArrayList(_list585.size); + long _elem586; + for (int _i587 = 0; _i587 < _list585.size; ++_i587) { - _elem570 = iprot.readI64(); - struct.invalidWriteIds.add(_elem570); + _elem586 = iprot.readI64(); + struct.invalidWriteIds.add(_elem586); } } struct.setInvalidWriteIdsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index adddd077ed..9a97d6b79c 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -78,7 +78,7 @@ public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException; public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -90,6 +90,8 @@ public void add_not_null_constraint(AddNotNullConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public void add_default_constraint(AddDefaultConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -232,6 +234,8 @@ public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; public boolean update_partition_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; @@ -450,7 +454,7 @@ public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -462,6 +466,8 @@ public void add_not_null_constraint(AddNotNullConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void add_default_constraint(AddDefaultConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -604,6 +610,8 @@ public void get_not_null_constraints(NotNullConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_default_constraints(DefaultConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -1336,13 +1344,13 @@ public void recv_create_table_with_environment_context() throws AlreadyExistsExc return; } - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException { - send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); recv_create_table_with_constraints(); } - public void send_create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws org.apache.thrift.TException + public void send_create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints) throws org.apache.thrift.TException { create_table_with_constraints_args args = new create_table_with_constraints_args(); args.setTbl(tbl); @@ -1350,6 +1358,7 @@ public void send_create_table_with_constraints(Table tbl, List pr args.setForeignKeys(foreignKeys); args.setUniqueConstraints(uniqueConstraints); args.setNotNullConstraints(notNullConstraints); + args.setDefaultConstraints(defaultConstraints); sendBase("create_table_with_constraints", args); } @@ -1502,6 +1511,32 @@ public void recv_add_not_null_constraint() throws NoSuchObjectException, MetaExc return; } + public void add_default_constraint(AddDefaultConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_add_default_constraint(req); + recv_add_default_constraint(); + } + + public void send_add_default_constraint(AddDefaultConstraintRequest req) throws org.apache.thrift.TException + { + add_default_constraint_args args = new add_default_constraint_args(); + args.setReq(req); + sendBase("add_default_constraint", args); + } + + public void recv_add_default_constraint() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + add_default_constraint_result result = new add_default_constraint_result(); + receiveBase(result, "add_default_constraint"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + return; + } + public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { send_drop_table(dbname, name, deleteData); @@ -3691,6 +3726,35 @@ public NotNullConstraintsResponse recv_get_not_null_constraints() throws MetaExc throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_not_null_constraints failed: unknown result"); } + public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + send_get_default_constraints(request); + return recv_get_default_constraints(); + } + + public void send_get_default_constraints(DefaultConstraintsRequest request) throws org.apache.thrift.TException + { + get_default_constraints_args args = new get_default_constraints_args(); + args.setRequest(request); + sendBase("get_default_constraints", args); + } + + public DefaultConstraintsResponse recv_get_default_constraints() throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + get_default_constraints_result result = new get_default_constraints_result(); + receiveBase(result, "get_default_constraints"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_default_constraints failed: unknown result"); + } + public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { send_update_table_column_statistics(stats_obj); @@ -6762,9 +6826,9 @@ public void getResult() throws AlreadyExistsException, InvalidObjectException, M } } - public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void create_table_with_constraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - create_table_with_constraints_call method_call = new create_table_with_constraints_call(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, resultHandler, this, ___protocolFactory, ___transport); + create_table_with_constraints_call method_call = new create_table_with_constraints_call(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -6775,13 +6839,15 @@ public void create_table_with_constraints(Table tbl, List primary private List foreignKeys; private List uniqueConstraints; private List notNullConstraints; - public create_table_with_constraints_call(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + private List defaultConstraints; + public create_table_with_constraints_call(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tbl = tbl; this.primaryKeys = primaryKeys; this.foreignKeys = foreignKeys; this.uniqueConstraints = uniqueConstraints; this.notNullConstraints = notNullConstraints; + this.defaultConstraints = defaultConstraints; } public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { @@ -6792,6 +6858,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa args.setForeignKeys(foreignKeys); args.setUniqueConstraints(uniqueConstraints); args.setNotNullConstraints(notNullConstraints); + args.setDefaultConstraints(defaultConstraints); args.write(prot); prot.writeMessageEnd(); } @@ -6966,6 +7033,38 @@ public void getResult() throws NoSuchObjectException, MetaException, org.apache. } } + public void add_default_constraint(AddDefaultConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + add_default_constraint_call method_call = new add_default_constraint_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_default_constraint_call extends org.apache.thrift.async.TAsyncMethodCall { + private AddDefaultConstraintRequest req; + public add_default_constraint_call(AddDefaultConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_default_constraint", org.apache.thrift.protocol.TMessageType.CALL, 0)); + add_default_constraint_args args = new add_default_constraint_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_add_default_constraint(); + } + } + public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); drop_table_call method_call = new drop_table_call(dbname, name, deleteData, resultHandler, this, ___protocolFactory, ___transport); @@ -9637,6 +9736,38 @@ public NotNullConstraintsResponse getResult() throws MetaException, NoSuchObject } } + public void get_default_constraints(DefaultConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_default_constraints_call method_call = new get_default_constraints_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_default_constraints_call extends org.apache.thrift.async.TAsyncMethodCall { + private DefaultConstraintsRequest request; + public get_default_constraints_call(DefaultConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_default_constraints", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_default_constraints_args args = new get_default_constraints_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public DefaultConstraintsResponse getResult() throws MetaException, NoSuchObjectException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_default_constraints(); + } + } + public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); update_table_column_statistics_call method_call = new update_table_column_statistics_call(stats_obj, resultHandler, this, ___protocolFactory, ___transport); @@ -12588,6 +12719,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public add_default_constraint() { + super("add_default_constraint"); + } + + public add_default_constraint_args getEmptyArgsInstance() { + return new add_default_constraint_args(); + } + + protected boolean isOneway() { + return false; + } + + public add_default_constraint_result getResult(I iface, add_default_constraint_args args) throws org.apache.thrift.TException { + add_default_constraint_result result = new add_default_constraint_result(); + try { + iface.add_default_constraint(args.req); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table extends org.apache.thrift.ProcessFunction { public drop_table() { super("drop_table"); @@ -15273,6 +15432,32 @@ public get_not_null_constraints_result getResult(I iface, get_not_null_constrain } } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_default_constraints extends org.apache.thrift.ProcessFunction { + public get_default_constraints() { + super("get_default_constraints"); + } + + public get_default_constraints_args getEmptyArgsInstance() { + return new get_default_constraints_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_default_constraints_result getResult(I iface, get_default_constraints_args args) throws org.apache.thrift.TException { + get_default_constraints_result result = new get_default_constraints_result(); + try { + result.success = iface.get_default_constraints(args.request); + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics extends org.apache.thrift.ProcessFunction { public update_table_column_statistics() { super("update_table_column_statistics"); @@ -17514,6 +17699,7 @@ protected AsyncProcessor(I iface, Map resultHandler) throws TException { - iface.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints,resultHandler); + iface.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints,resultHandler); } } @@ -19195,6 +19382,67 @@ public void start(I iface, add_not_null_constraint_args args, org.apache.thrift. } } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_default_constraint extends org.apache.thrift.AsyncProcessFunction { + public add_default_constraint() { + super("add_default_constraint"); + } + + public add_default_constraint_args getEmptyArgsInstance() { + return new add_default_constraint_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + add_default_constraint_result result = new add_default_constraint_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + add_default_constraint_result result = new add_default_constraint_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, add_default_constraint_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.add_default_constraint(args.req,resultHandler); + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table extends org.apache.thrift.AsyncProcessFunction { public drop_table() { super("drop_table"); @@ -23657,22 +23905,21 @@ public void start(I iface, get_not_null_constraints_args args, org.apache.thrift } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { - public update_table_column_statistics() { - super("update_table_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_default_constraints extends org.apache.thrift.AsyncProcessFunction { + public get_default_constraints() { + super("get_default_constraints"); } - public update_table_column_statistics_args getEmptyArgsInstance() { - return new update_table_column_statistics_args(); + public get_default_constraints_args getEmptyArgsInstance() { + return new get_default_constraints_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Boolean o) { - update_table_column_statistics_result result = new update_table_column_statistics_result(); + return new AsyncMethodCallback() { + public void onComplete(DefaultConstraintsResponse o) { + get_default_constraints_result result = new get_default_constraints_result(); result.success = o; - result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -23684,26 +23931,16 @@ public void onComplete(Boolean o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - update_table_column_statistics_result result = new update_table_column_statistics_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; + get_default_constraints_result result = new get_default_constraints_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof InvalidObjectException) { - result.o2 = (InvalidObjectException) e; + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; result.setO2IsSet(true); msg = result; - } - else if (e instanceof MetaException) { - result.o3 = (MetaException) e; - result.setO3IsSet(true); - msg = result; - } - else if (e instanceof InvalidInputException) { - result.o4 = (InvalidInputException) e; - result.setO4IsSet(true); - msg = result; } else { @@ -23725,25 +23962,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, update_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.update_table_column_statistics(args.stats_obj,resultHandler); + public void start(I iface, get_default_constraints_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_default_constraints(args.request,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics extends org.apache.thrift.AsyncProcessFunction { - public update_partition_column_statistics() { - super("update_partition_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public update_table_column_statistics() { + super("update_table_column_statistics"); } - public update_partition_column_statistics_args getEmptyArgsInstance() { - return new update_partition_column_statistics_args(); + public update_table_column_statistics_args getEmptyArgsInstance() { + return new update_table_column_statistics_args(); } public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback() { public void onComplete(Boolean o) { - update_partition_column_statistics_result result = new update_partition_column_statistics_result(); + update_table_column_statistics_result result = new update_table_column_statistics_result(); result.success = o; result.setSuccessIsSet(true); try { @@ -23757,7 +23994,7 @@ public void onComplete(Boolean o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - update_partition_column_statistics_result result = new update_partition_column_statistics_result(); + update_table_column_statistics_result result = new update_table_column_statistics_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); @@ -23798,26 +24035,27 @@ protected boolean isOneway() { return false; } - public void start(I iface, update_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.update_partition_column_statistics(args.stats_obj,resultHandler); + public void start(I iface, update_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.update_table_column_statistics(args.stats_obj,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { - public get_table_column_statistics() { - super("get_table_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public update_partition_column_statistics() { + super("update_partition_column_statistics"); } - public get_table_column_statistics_args getEmptyArgsInstance() { - return new get_table_column_statistics_args(); + public update_partition_column_statistics_args getEmptyArgsInstance() { + return new update_partition_column_statistics_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(ColumnStatistics o) { - get_table_column_statistics_result result = new get_table_column_statistics_result(); + return new AsyncMethodCallback() { + public void onComplete(Boolean o) { + update_partition_column_statistics_result result = new update_partition_column_statistics_result(); result.success = o; + result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -23829,24 +24067,96 @@ public void onComplete(ColumnStatistics o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_table_column_statistics_result result = new get_table_column_statistics_result(); + update_partition_column_statistics_result result = new update_partition_column_statistics_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; result.setO2IsSet(true); msg = result; } - else if (e instanceof InvalidInputException) { - result.o3 = (InvalidInputException) e; + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; result.setO3IsSet(true); msg = result; } - else if (e instanceof InvalidObjectException) { - result.o4 = (InvalidObjectException) e; + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, update_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.update_partition_column_statistics(args.stats_obj,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public get_table_column_statistics() { + super("get_table_column_statistics"); + } + + public get_table_column_statistics_args getEmptyArgsInstance() { + return new get_table_column_statistics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(ColumnStatistics o) { + get_table_column_statistics_result result = new get_table_column_statistics_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_table_column_statistics_result result = new get_table_column_statistics_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o3 = (InvalidInputException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o4 = (InvalidObjectException) e; result.setO4IsSet(true); msg = result; } @@ -34354,13 +34664,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list858 = iprot.readListBegin(); - struct.success = new ArrayList(_list858.size); - String _elem859; - for (int _i860 = 0; _i860 < _list858.size; ++_i860) + org.apache.thrift.protocol.TList _list874 = iprot.readListBegin(); + struct.success = new ArrayList(_list874.size); + String _elem875; + for (int _i876 = 0; _i876 < _list874.size; ++_i876) { - _elem859 = iprot.readString(); - struct.success.add(_elem859); + _elem875 = iprot.readString(); + struct.success.add(_elem875); } iprot.readListEnd(); } @@ -34395,9 +34705,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter861 : struct.success) + for (String _iter877 : struct.success) { - oprot.writeString(_iter861); + oprot.writeString(_iter877); } oprot.writeListEnd(); } @@ -34436,9 +34746,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter862 : struct.success) + for (String _iter878 : struct.success) { - oprot.writeString(_iter862); + oprot.writeString(_iter878); } } } @@ -34453,13 +34763,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list863 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list863.size); - String _elem864; - for (int _i865 = 0; _i865 < _list863.size; ++_i865) + org.apache.thrift.protocol.TList _list879 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list879.size); + String _elem880; + for (int _i881 = 0; _i881 < _list879.size; ++_i881) { - _elem864 = iprot.readString(); - struct.success.add(_elem864); + _elem880 = iprot.readString(); + struct.success.add(_elem880); } } struct.setSuccessIsSet(true); @@ -35113,13 +35423,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list866 = iprot.readListBegin(); - struct.success = new ArrayList(_list866.size); - String _elem867; - for (int _i868 = 0; _i868 < _list866.size; ++_i868) + org.apache.thrift.protocol.TList _list882 = iprot.readListBegin(); + struct.success = new ArrayList(_list882.size); + String _elem883; + for (int _i884 = 0; _i884 < _list882.size; ++_i884) { - _elem867 = iprot.readString(); - struct.success.add(_elem867); + _elem883 = iprot.readString(); + struct.success.add(_elem883); } iprot.readListEnd(); } @@ -35154,9 +35464,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter869 : struct.success) + for (String _iter885 : struct.success) { - oprot.writeString(_iter869); + oprot.writeString(_iter885); } oprot.writeListEnd(); } @@ -35195,9 +35505,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter870 : struct.success) + for (String _iter886 : struct.success) { - oprot.writeString(_iter870); + oprot.writeString(_iter886); } } } @@ -35212,13 +35522,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list871 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list871.size); - String _elem872; - for (int _i873 = 0; _i873 < _list871.size; ++_i873) + org.apache.thrift.protocol.TList _list887 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list887.size); + String _elem888; + for (int _i889 = 0; _i889 < _list887.size; ++_i889) { - _elem872 = iprot.readString(); - struct.success.add(_elem872); + _elem888 = iprot.readString(); + struct.success.add(_elem888); } } struct.setSuccessIsSet(true); @@ -39825,16 +40135,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map874 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map874.size); - String _key875; - Type _val876; - for (int _i877 = 0; _i877 < _map874.size; ++_i877) + org.apache.thrift.protocol.TMap _map890 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map890.size); + String _key891; + Type _val892; + for (int _i893 = 0; _i893 < _map890.size; ++_i893) { - _key875 = iprot.readString(); - _val876 = new Type(); - _val876.read(iprot); - struct.success.put(_key875, _val876); + _key891 = iprot.readString(); + _val892 = new Type(); + _val892.read(iprot); + struct.success.put(_key891, _val892); } iprot.readMapEnd(); } @@ -39869,10 +40179,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter878 : struct.success.entrySet()) + for (Map.Entry _iter894 : struct.success.entrySet()) { - oprot.writeString(_iter878.getKey()); - _iter878.getValue().write(oprot); + oprot.writeString(_iter894.getKey()); + _iter894.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -39911,10 +40221,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter879 : struct.success.entrySet()) + for (Map.Entry _iter895 : struct.success.entrySet()) { - oprot.writeString(_iter879.getKey()); - _iter879.getValue().write(oprot); + oprot.writeString(_iter895.getKey()); + _iter895.getValue().write(oprot); } } } @@ -39929,16 +40239,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map880 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map880.size); - String _key881; - Type _val882; - for (int _i883 = 0; _i883 < _map880.size; ++_i883) + org.apache.thrift.protocol.TMap _map896 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map896.size); + String _key897; + Type _val898; + for (int _i899 = 0; _i899 < _map896.size; ++_i899) { - _key881 = iprot.readString(); - _val882 = new Type(); - _val882.read(iprot); - struct.success.put(_key881, _val882); + _key897 = iprot.readString(); + _val898 = new Type(); + _val898.read(iprot); + struct.success.put(_key897, _val898); } } struct.setSuccessIsSet(true); @@ -40973,14 +41283,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list884 = iprot.readListBegin(); - struct.success = new ArrayList(_list884.size); - FieldSchema _elem885; - for (int _i886 = 0; _i886 < _list884.size; ++_i886) + org.apache.thrift.protocol.TList _list900 = iprot.readListBegin(); + struct.success = new ArrayList(_list900.size); + FieldSchema _elem901; + for (int _i902 = 0; _i902 < _list900.size; ++_i902) { - _elem885 = new FieldSchema(); - _elem885.read(iprot); - struct.success.add(_elem885); + _elem901 = new FieldSchema(); + _elem901.read(iprot); + struct.success.add(_elem901); } iprot.readListEnd(); } @@ -41033,9 +41343,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter887 : struct.success) + for (FieldSchema _iter903 : struct.success) { - _iter887.write(oprot); + _iter903.write(oprot); } oprot.writeListEnd(); } @@ -41090,9 +41400,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter888 : struct.success) + for (FieldSchema _iter904 : struct.success) { - _iter888.write(oprot); + _iter904.write(oprot); } } } @@ -41113,14 +41423,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list889 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list889.size); - FieldSchema _elem890; - for (int _i891 = 0; _i891 < _list889.size; ++_i891) + org.apache.thrift.protocol.TList _list905 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list905.size); + FieldSchema _elem906; + for (int _i907 = 0; _i907 < _list905.size; ++_i907) { - _elem890 = new FieldSchema(); - _elem890.read(iprot); - struct.success.add(_elem890); + _elem906 = new FieldSchema(); + _elem906.read(iprot); + struct.success.add(_elem906); } } struct.setSuccessIsSet(true); @@ -42274,14 +42584,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list892 = iprot.readListBegin(); - struct.success = new ArrayList(_list892.size); - FieldSchema _elem893; - for (int _i894 = 0; _i894 < _list892.size; ++_i894) + org.apache.thrift.protocol.TList _list908 = iprot.readListBegin(); + struct.success = new ArrayList(_list908.size); + FieldSchema _elem909; + for (int _i910 = 0; _i910 < _list908.size; ++_i910) { - _elem893 = new FieldSchema(); - _elem893.read(iprot); - struct.success.add(_elem893); + _elem909 = new FieldSchema(); + _elem909.read(iprot); + struct.success.add(_elem909); } iprot.readListEnd(); } @@ -42334,9 +42644,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter895 : struct.success) + for (FieldSchema _iter911 : struct.success) { - _iter895.write(oprot); + _iter911.write(oprot); } oprot.writeListEnd(); } @@ -42391,9 +42701,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter896 : struct.success) + for (FieldSchema _iter912 : struct.success) { - _iter896.write(oprot); + _iter912.write(oprot); } } } @@ -42414,14 +42724,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list897 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list897.size); - FieldSchema _elem898; - for (int _i899 = 0; _i899 < _list897.size; ++_i899) + org.apache.thrift.protocol.TList _list913 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list913.size); + FieldSchema _elem914; + for (int _i915 = 0; _i915 < _list913.size; ++_i915) { - _elem898 = new FieldSchema(); - _elem898.read(iprot); - struct.success.add(_elem898); + _elem914 = new FieldSchema(); + _elem914.read(iprot); + struct.success.add(_elem914); } } struct.setSuccessIsSet(true); @@ -43466,14 +43776,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list900 = iprot.readListBegin(); - struct.success = new ArrayList(_list900.size); - FieldSchema _elem901; - for (int _i902 = 0; _i902 < _list900.size; ++_i902) + org.apache.thrift.protocol.TList _list916 = iprot.readListBegin(); + struct.success = new ArrayList(_list916.size); + FieldSchema _elem917; + for (int _i918 = 0; _i918 < _list916.size; ++_i918) { - _elem901 = new FieldSchema(); - _elem901.read(iprot); - struct.success.add(_elem901); + _elem917 = new FieldSchema(); + _elem917.read(iprot); + struct.success.add(_elem917); } iprot.readListEnd(); } @@ -43526,9 +43836,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter903 : struct.success) + for (FieldSchema _iter919 : struct.success) { - _iter903.write(oprot); + _iter919.write(oprot); } oprot.writeListEnd(); } @@ -43583,9 +43893,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter904 : struct.success) + for (FieldSchema _iter920 : struct.success) { - _iter904.write(oprot); + _iter920.write(oprot); } } } @@ -43606,14 +43916,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list905 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list905.size); - FieldSchema _elem906; - for (int _i907 = 0; _i907 < _list905.size; ++_i907) + org.apache.thrift.protocol.TList _list921 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list921.size); + FieldSchema _elem922; + for (int _i923 = 0; _i923 < _list921.size; ++_i923) { - _elem906 = new FieldSchema(); - _elem906.read(iprot); - struct.success.add(_elem906); + _elem922 = new FieldSchema(); + _elem922.read(iprot); + struct.success.add(_elem922); } } struct.setSuccessIsSet(true); @@ -44767,14 +45077,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list908 = iprot.readListBegin(); - struct.success = new ArrayList(_list908.size); - FieldSchema _elem909; - for (int _i910 = 0; _i910 < _list908.size; ++_i910) + org.apache.thrift.protocol.TList _list924 = iprot.readListBegin(); + struct.success = new ArrayList(_list924.size); + FieldSchema _elem925; + for (int _i926 = 0; _i926 < _list924.size; ++_i926) { - _elem909 = new FieldSchema(); - _elem909.read(iprot); - struct.success.add(_elem909); + _elem925 = new FieldSchema(); + _elem925.read(iprot); + struct.success.add(_elem925); } iprot.readListEnd(); } @@ -44827,9 +45137,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter911 : struct.success) + for (FieldSchema _iter927 : struct.success) { - _iter911.write(oprot); + _iter927.write(oprot); } oprot.writeListEnd(); } @@ -44884,9 +45194,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter912 : struct.success) + for (FieldSchema _iter928 : struct.success) { - _iter912.write(oprot); + _iter928.write(oprot); } } } @@ -44907,14 +45217,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list913 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list913.size); - FieldSchema _elem914; - for (int _i915 = 0; _i915 < _list913.size; ++_i915) + org.apache.thrift.protocol.TList _list929 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list929.size); + FieldSchema _elem930; + for (int _i931 = 0; _i931 < _list929.size; ++_i931) { - _elem914 = new FieldSchema(); - _elem914.read(iprot); - struct.success.add(_elem914); + _elem930 = new FieldSchema(); + _elem930.read(iprot); + struct.success.add(_elem930); } } struct.setSuccessIsSet(true); @@ -47138,6 +47448,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en private static final org.apache.thrift.protocol.TField FOREIGN_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeys", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField UNIQUE_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("uniqueConstraints", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField NOT_NULL_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("notNullConstraints", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraints", org.apache.thrift.protocol.TType.LIST, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47150,6 +47461,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en private List foreignKeys; // required private List uniqueConstraints; // required private List notNullConstraints; // required + private List defaultConstraints; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -47157,7 +47469,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_en PRIMARY_KEYS((short)2, "primaryKeys"), FOREIGN_KEYS((short)3, "foreignKeys"), UNIQUE_CONSTRAINTS((short)4, "uniqueConstraints"), - NOT_NULL_CONSTRAINTS((short)5, "notNullConstraints"); + NOT_NULL_CONSTRAINTS((short)5, "notNullConstraints"), + DEFAULT_CONSTRAINTS((short)6, "defaultConstraints"); private static final Map byName = new HashMap(); @@ -47182,6 +47495,8 @@ public static _Fields findByThriftId(int fieldId) { return UNIQUE_CONSTRAINTS; case 5: // NOT_NULL_CONSTRAINTS return NOT_NULL_CONSTRAINTS; + case 6: // DEFAULT_CONSTRAINTS + return DEFAULT_CONSTRAINTS; default: return null; } @@ -47239,6 +47554,9 @@ public String getFieldName() { tmpMap.put(_Fields.NOT_NULL_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("notNullConstraints", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLNotNullConstraint.class)))); + tmpMap.put(_Fields.DEFAULT_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraints", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_table_with_constraints_args.class, metaDataMap); } @@ -47251,7 +47569,8 @@ public create_table_with_constraints_args( List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) { this(); this.tbl = tbl; @@ -47259,6 +47578,7 @@ public create_table_with_constraints_args( this.foreignKeys = foreignKeys; this.uniqueConstraints = uniqueConstraints; this.notNullConstraints = notNullConstraints; + this.defaultConstraints = defaultConstraints; } /** @@ -47296,6 +47616,13 @@ public create_table_with_constraints_args(create_table_with_constraints_args oth } this.notNullConstraints = __this__notNullConstraints; } + if (other.isSetDefaultConstraints()) { + List __this__defaultConstraints = new ArrayList(other.defaultConstraints.size()); + for (SQLDefaultConstraint other_element : other.defaultConstraints) { + __this__defaultConstraints.add(new SQLDefaultConstraint(other_element)); + } + this.defaultConstraints = __this__defaultConstraints; + } } public create_table_with_constraints_args deepCopy() { @@ -47309,6 +47636,7 @@ public void clear() { this.foreignKeys = null; this.uniqueConstraints = null; this.notNullConstraints = null; + this.defaultConstraints = null; } public Table getTbl() { @@ -47486,6 +47814,44 @@ public void setNotNullConstraintsIsSet(boolean value) { } } + public int getDefaultConstraintsSize() { + return (this.defaultConstraints == null) ? 0 : this.defaultConstraints.size(); + } + + public java.util.Iterator getDefaultConstraintsIterator() { + return (this.defaultConstraints == null) ? null : this.defaultConstraints.iterator(); + } + + public void addToDefaultConstraints(SQLDefaultConstraint elem) { + if (this.defaultConstraints == null) { + this.defaultConstraints = new ArrayList(); + } + this.defaultConstraints.add(elem); + } + + public List getDefaultConstraints() { + return this.defaultConstraints; + } + + public void setDefaultConstraints(List defaultConstraints) { + this.defaultConstraints = defaultConstraints; + } + + public void unsetDefaultConstraints() { + this.defaultConstraints = null; + } + + /** Returns true if field defaultConstraints is set (has been assigned a value) and false otherwise */ + public boolean isSetDefaultConstraints() { + return this.defaultConstraints != null; + } + + public void setDefaultConstraintsIsSet(boolean value) { + if (!value) { + this.defaultConstraints = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TBL: @@ -47528,6 +47894,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case DEFAULT_CONSTRAINTS: + if (value == null) { + unsetDefaultConstraints(); + } else { + setDefaultConstraints((List)value); + } + break; + } } @@ -47548,6 +47922,9 @@ public Object getFieldValue(_Fields field) { case NOT_NULL_CONSTRAINTS: return getNotNullConstraints(); + case DEFAULT_CONSTRAINTS: + return getDefaultConstraints(); + } throw new IllegalStateException(); } @@ -47569,6 +47946,8 @@ public boolean isSet(_Fields field) { return isSetUniqueConstraints(); case NOT_NULL_CONSTRAINTS: return isSetNotNullConstraints(); + case DEFAULT_CONSTRAINTS: + return isSetDefaultConstraints(); } throw new IllegalStateException(); } @@ -47631,6 +48010,15 @@ public boolean equals(create_table_with_constraints_args that) { return false; } + boolean this_present_defaultConstraints = true && this.isSetDefaultConstraints(); + boolean that_present_defaultConstraints = true && that.isSetDefaultConstraints(); + if (this_present_defaultConstraints || that_present_defaultConstraints) { + if (!(this_present_defaultConstraints && that_present_defaultConstraints)) + return false; + if (!this.defaultConstraints.equals(that.defaultConstraints)) + return false; + } + return true; } @@ -47663,6 +48051,11 @@ public int hashCode() { if (present_notNullConstraints) list.add(notNullConstraints); + boolean present_defaultConstraints = true && (isSetDefaultConstraints()); + list.add(present_defaultConstraints); + if (present_defaultConstraints) + list.add(defaultConstraints); + return list.hashCode(); } @@ -47724,6 +48117,16 @@ public int compareTo(create_table_with_constraints_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetDefaultConstraints()).compareTo(other.isSetDefaultConstraints()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDefaultConstraints()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultConstraints, other.defaultConstraints); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -47783,6 +48186,14 @@ public String toString() { sb.append(this.notNullConstraints); } first = false; + if (!first) sb.append(", "); + sb.append("defaultConstraints:"); + if (this.defaultConstraints == null) { + sb.append("null"); + } else { + sb.append(this.defaultConstraints); + } + first = false; sb.append(")"); return sb.toString(); } @@ -47841,14 +48252,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list916 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list916.size); - SQLPrimaryKey _elem917; - for (int _i918 = 0; _i918 < _list916.size; ++_i918) + org.apache.thrift.protocol.TList _list932 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list932.size); + SQLPrimaryKey _elem933; + for (int _i934 = 0; _i934 < _list932.size; ++_i934) { - _elem917 = new SQLPrimaryKey(); - _elem917.read(iprot); - struct.primaryKeys.add(_elem917); + _elem933 = new SQLPrimaryKey(); + _elem933.read(iprot); + struct.primaryKeys.add(_elem933); } iprot.readListEnd(); } @@ -47860,14 +48271,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list919 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list919.size); - SQLForeignKey _elem920; - for (int _i921 = 0; _i921 < _list919.size; ++_i921) + org.apache.thrift.protocol.TList _list935 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list935.size); + SQLForeignKey _elem936; + for (int _i937 = 0; _i937 < _list935.size; ++_i937) { - _elem920 = new SQLForeignKey(); - _elem920.read(iprot); - struct.foreignKeys.add(_elem920); + _elem936 = new SQLForeignKey(); + _elem936.read(iprot); + struct.foreignKeys.add(_elem936); } iprot.readListEnd(); } @@ -47879,14 +48290,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list922 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list922.size); - SQLUniqueConstraint _elem923; - for (int _i924 = 0; _i924 < _list922.size; ++_i924) + org.apache.thrift.protocol.TList _list938 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list938.size); + SQLUniqueConstraint _elem939; + for (int _i940 = 0; _i940 < _list938.size; ++_i940) { - _elem923 = new SQLUniqueConstraint(); - _elem923.read(iprot); - struct.uniqueConstraints.add(_elem923); + _elem939 = new SQLUniqueConstraint(); + _elem939.read(iprot); + struct.uniqueConstraints.add(_elem939); } iprot.readListEnd(); } @@ -47898,14 +48309,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list925 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list925.size); - SQLNotNullConstraint _elem926; - for (int _i927 = 0; _i927 < _list925.size; ++_i927) + org.apache.thrift.protocol.TList _list941 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list941.size); + SQLNotNullConstraint _elem942; + for (int _i943 = 0; _i943 < _list941.size; ++_i943) { - _elem926 = new SQLNotNullConstraint(); - _elem926.read(iprot); - struct.notNullConstraints.add(_elem926); + _elem942 = new SQLNotNullConstraint(); + _elem942.read(iprot); + struct.notNullConstraints.add(_elem942); } iprot.readListEnd(); } @@ -47914,6 +48325,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // DEFAULT_CONSTRAINTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list944.size); + SQLDefaultConstraint _elem945; + for (int _i946 = 0; _i946 < _list944.size; ++_i946) + { + _elem945 = new SQLDefaultConstraint(); + _elem945.read(iprot); + struct.defaultConstraints.add(_elem945); + } + iprot.readListEnd(); + } + struct.setDefaultConstraintsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -47936,9 +48366,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter928 : struct.primaryKeys) + for (SQLPrimaryKey _iter947 : struct.primaryKeys) { - _iter928.write(oprot); + _iter947.write(oprot); } oprot.writeListEnd(); } @@ -47948,9 +48378,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter929 : struct.foreignKeys) + for (SQLForeignKey _iter948 : struct.foreignKeys) { - _iter929.write(oprot); + _iter948.write(oprot); } oprot.writeListEnd(); } @@ -47960,9 +48390,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter930 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter949 : struct.uniqueConstraints) { - _iter930.write(oprot); + _iter949.write(oprot); } oprot.writeListEnd(); } @@ -47972,9 +48402,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter931 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter950 : struct.notNullConstraints) + { + _iter950.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.defaultConstraints != null) { + oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); + for (SQLDefaultConstraint _iter951 : struct.defaultConstraints) { - _iter931.write(oprot); + _iter951.write(oprot); } oprot.writeListEnd(); } @@ -48013,43 +48455,55 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetNotNullConstraints()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetDefaultConstraints()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetTbl()) { struct.tbl.write(oprot); } if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter932 : struct.primaryKeys) + for (SQLPrimaryKey _iter952 : struct.primaryKeys) { - _iter932.write(oprot); + _iter952.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter933 : struct.foreignKeys) + for (SQLForeignKey _iter953 : struct.foreignKeys) { - _iter933.write(oprot); + _iter953.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter934 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter954 : struct.uniqueConstraints) { - _iter934.write(oprot); + _iter954.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter935 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter955 : struct.notNullConstraints) + { + _iter955.write(oprot); + } + } + } + if (struct.isSetDefaultConstraints()) { + { + oprot.writeI32(struct.defaultConstraints.size()); + for (SQLDefaultConstraint _iter956 : struct.defaultConstraints) { - _iter935.write(oprot); + _iter956.write(oprot); } } } @@ -48058,7 +48512,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c @Override public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.tbl = new Table(); struct.tbl.read(iprot); @@ -48066,60 +48520,74 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list936 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list936.size); - SQLPrimaryKey _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list957.size); + SQLPrimaryKey _elem958; + for (int _i959 = 0; _i959 < _list957.size; ++_i959) { - _elem937 = new SQLPrimaryKey(); - _elem937.read(iprot); - struct.primaryKeys.add(_elem937); + _elem958 = new SQLPrimaryKey(); + _elem958.read(iprot); + struct.primaryKeys.add(_elem958); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list939 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list939.size); - SQLForeignKey _elem940; - for (int _i941 = 0; _i941 < _list939.size; ++_i941) + org.apache.thrift.protocol.TList _list960 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list960.size); + SQLForeignKey _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem940 = new SQLForeignKey(); - _elem940.read(iprot); - struct.foreignKeys.add(_elem940); + _elem961 = new SQLForeignKey(); + _elem961.read(iprot); + struct.foreignKeys.add(_elem961); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list942 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list942.size); - SQLUniqueConstraint _elem943; - for (int _i944 = 0; _i944 < _list942.size; ++_i944) + org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list963.size); + SQLUniqueConstraint _elem964; + for (int _i965 = 0; _i965 < _list963.size; ++_i965) { - _elem943 = new SQLUniqueConstraint(); - _elem943.read(iprot); - struct.uniqueConstraints.add(_elem943); + _elem964 = new SQLUniqueConstraint(); + _elem964.read(iprot); + struct.uniqueConstraints.add(_elem964); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list945 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list945.size); - SQLNotNullConstraint _elem946; - for (int _i947 = 0; _i947 < _list945.size; ++_i947) + org.apache.thrift.protocol.TList _list966 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list966.size); + SQLNotNullConstraint _elem967; + for (int _i968 = 0; _i968 < _list966.size; ++_i968) { - _elem946 = new SQLNotNullConstraint(); - _elem946.read(iprot); - struct.notNullConstraints.add(_elem946); + _elem967 = new SQLNotNullConstraint(); + _elem967.read(iprot); + struct.notNullConstraints.add(_elem967); } } struct.setNotNullConstraintsIsSet(true); } + if (incoming.get(5)) { + { + org.apache.thrift.protocol.TList _list969 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list969.size); + SQLDefaultConstraint _elem970; + for (int _i971 = 0; _i971 < _list969.size; ++_i971) + { + _elem970 = new SQLDefaultConstraint(); + _elem970.read(iprot); + struct.defaultConstraints.add(_elem970); + } + } + struct.setDefaultConstraintsIsSet(true); + } } } @@ -52948,28 +53416,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_not_null_constra } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_default_constraint_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_default_constraint_args"); - private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_table_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_table_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_default_constraint_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_default_constraint_argsTupleSchemeFactory()); } - private String dbname; // required - private String name; // required - private boolean deleteData; // required + private AddDefaultConstraintRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DBNAME((short)1, "dbname"), - NAME((short)2, "name"), - DELETE_DATA((short)3, "deleteData"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -52984,12 +53446,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_not_null_constra */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DBNAME - return DBNAME; - case 2: // NAME - return NAME; - case 3: // DELETE_DATA - return DELETE_DATA; + case 1: // REQ + return REQ; default: return null; } @@ -53030,153 +53488,73 @@ public String getFieldName() { } // isset id assignments - private static final int __DELETEDATA_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDefaultConstraintRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_default_constraint_args.class, metaDataMap); } - public drop_table_args() { + public add_default_constraint_args() { } - public drop_table_args( - String dbname, - String name, - boolean deleteData) + public add_default_constraint_args( + AddDefaultConstraintRequest req) { this(); - this.dbname = dbname; - this.name = name; - this.deleteData = deleteData; - setDeleteDataIsSet(true); + this.req = req; } /** * Performs a deep copy on other. */ - public drop_table_args(drop_table_args other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetDbname()) { - this.dbname = other.dbname; - } - if (other.isSetName()) { - this.name = other.name; + public add_default_constraint_args(add_default_constraint_args other) { + if (other.isSetReq()) { + this.req = new AddDefaultConstraintRequest(other.req); } - this.deleteData = other.deleteData; } - public drop_table_args deepCopy() { - return new drop_table_args(this); + public add_default_constraint_args deepCopy() { + return new add_default_constraint_args(this); } @Override public void clear() { - this.dbname = null; - this.name = null; - setDeleteDataIsSet(false); - this.deleteData = false; - } - - public String getDbname() { - return this.dbname; - } - - public void setDbname(String dbname) { - this.dbname = dbname; - } - - public void unsetDbname() { - this.dbname = null; - } - - /** Returns true if field dbname is set (has been assigned a value) and false otherwise */ - public boolean isSetDbname() { - return this.dbname != null; - } - - public void setDbnameIsSet(boolean value) { - if (!value) { - this.dbname = null; - } + this.req = null; } - public String getName() { - return this.name; + public AddDefaultConstraintRequest getReq() { + return this.req; } - public void setName(String name) { - this.name = name; + public void setReq(AddDefaultConstraintRequest req) { + this.req = req; } - public void unsetName() { - this.name = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field name is set (has been assigned a value) and false otherwise */ - public boolean isSetName() { - return this.name != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setNameIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.name = null; + this.req = null; } } - public boolean isDeleteData() { - return this.deleteData; - } - - public void setDeleteData(boolean deleteData) { - this.deleteData = deleteData; - setDeleteDataIsSet(true); - } - - public void unsetDeleteData() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID); - } - - /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ - public boolean isSetDeleteData() { - return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID); - } - - public void setDeleteDataIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { - case DBNAME: - if (value == null) { - unsetDbname(); - } else { - setDbname((String)value); - } - break; - - case NAME: - if (value == null) { - unsetName(); - } else { - setName((String)value); - } - break; - - case DELETE_DATA: + case REQ: if (value == null) { - unsetDeleteData(); + unsetReq(); } else { - setDeleteData((Boolean)value); + setReq((AddDefaultConstraintRequest)value); } break; @@ -53185,14 +53563,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DBNAME: - return getDbname(); - - case NAME: - return getName(); - - case DELETE_DATA: - return isDeleteData(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -53205,12 +53577,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case DBNAME: - return isSetDbname(); - case NAME: - return isSetName(); - case DELETE_DATA: - return isSetDeleteData(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -53219,39 +53587,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_table_args) - return this.equals((drop_table_args)that); + if (that instanceof add_default_constraint_args) + return this.equals((add_default_constraint_args)that); return false; } - public boolean equals(drop_table_args that) { + public boolean equals(add_default_constraint_args that) { if (that == null) return false; - boolean this_present_dbname = true && this.isSetDbname(); - boolean that_present_dbname = true && that.isSetDbname(); - if (this_present_dbname || that_present_dbname) { - if (!(this_present_dbname && that_present_dbname)) - return false; - if (!this.dbname.equals(that.dbname)) - return false; - } - - boolean this_present_name = true && this.isSetName(); - boolean that_present_name = true && that.isSetName(); - if (this_present_name || that_present_name) { - if (!(this_present_name && that_present_name)) - return false; - if (!this.name.equals(that.name)) - return false; - } - - boolean this_present_deleteData = true; - boolean that_present_deleteData = true; - if (this_present_deleteData || that_present_deleteData) { - if (!(this_present_deleteData && that_present_deleteData)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (this.deleteData != that.deleteData) + if (!this.req.equals(that.req)) return false; } @@ -53262,58 +53612,28 @@ public boolean equals(drop_table_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_dbname = true && (isSetDbname()); - list.add(present_dbname); - if (present_dbname) - list.add(dbname); - - boolean present_name = true && (isSetName()); - list.add(present_name); - if (present_name) - list.add(name); - - boolean present_deleteData = true; - list.add(present_deleteData); - if (present_deleteData) - list.add(deleteData); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(drop_table_args other) { + public int compareTo(add_default_constraint_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDbname()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetDeleteData()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -53335,28 +53655,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_table_args("); + StringBuilder sb = new StringBuilder("add_default_constraint_args("); boolean first = true; - sb.append("dbname:"); - if (this.dbname == null) { - sb.append("null"); - } else { - sb.append(this.dbname); - } - first = false; - if (!first) sb.append(", "); - sb.append("name:"); - if (this.name == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.name); + sb.append(this.req); } first = false; - if (!first) sb.append(", "); - sb.append("deleteData:"); - sb.append(this.deleteData); - first = false; sb.append(")"); return sb.toString(); } @@ -53364,6 +53672,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -53376,23 +53687,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class drop_table_argsStandardSchemeFactory implements SchemeFactory { - public drop_table_argsStandardScheme getScheme() { - return new drop_table_argsStandardScheme(); + private static class add_default_constraint_argsStandardSchemeFactory implements SchemeFactory { + public add_default_constraint_argsStandardScheme getScheme() { + return new add_default_constraint_argsStandardScheme(); } } - private static class drop_table_argsStandardScheme extends StandardScheme { + private static class add_default_constraint_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_default_constraint_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -53402,26 +53711,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_args str break; } switch (schemeField.id) { - case 1: // DBNAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.dbname = iprot.readString(); - struct.setDbnameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.name = iprot.readString(); - struct.setNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // DELETE_DATA - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.deleteData = iprot.readBool(); - struct.setDeleteDataIsSet(true); + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new AddDefaultConstraintRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -53435,102 +53729,75 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_default_constraint_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.dbname != null) { - oprot.writeFieldBegin(DBNAME_FIELD_DESC); - oprot.writeString(struct.dbname); - oprot.writeFieldEnd(); - } - if (struct.name != null) { - oprot.writeFieldBegin(NAME_FIELD_DESC); - oprot.writeString(struct.name); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); - oprot.writeBool(struct.deleteData); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class drop_table_argsTupleSchemeFactory implements SchemeFactory { - public drop_table_argsTupleScheme getScheme() { - return new drop_table_argsTupleScheme(); + private static class add_default_constraint_argsTupleSchemeFactory implements SchemeFactory { + public add_default_constraint_argsTupleScheme getScheme() { + return new add_default_constraint_argsTupleScheme(); } } - private static class drop_table_argsTupleScheme extends TupleScheme { + private static class add_default_constraint_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_default_constraint_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDbname()) { + if (struct.isSetReq()) { optionals.set(0); } - if (struct.isSetName()) { - optionals.set(1); - } - if (struct.isSetDeleteData()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetDbname()) { - oprot.writeString(struct.dbname); - } - if (struct.isSetName()) { - oprot.writeString(struct.name); - } - if (struct.isSetDeleteData()) { - oprot.writeBool(struct.deleteData); + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_default_constraint_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.dbname = iprot.readString(); - struct.setDbnameIsSet(true); - } - if (incoming.get(1)) { - struct.name = iprot.readString(); - struct.setNameIsSet(true); - } - if (incoming.get(2)) { - struct.deleteData = iprot.readBool(); - struct.setDeleteDataIsSet(true); + struct.req = new AddDefaultConstraintRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_default_constraint_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_default_constraint_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_table_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_table_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_default_constraint_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_default_constraint_resultTupleSchemeFactory()); } private NoSuchObjectException o1; // required - private MetaException o3; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { O1((short)1, "o1"), - O3((short)2, "o3"); + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -53547,8 +53814,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // O1 return O1; - case 2: // O3 - return O3; + case 2: // O2 + return O2; default: return null; } @@ -53594,44 +53861,44 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_default_constraint_result.class, metaDataMap); } - public drop_table_result() { + public add_default_constraint_result() { } - public drop_table_result( + public add_default_constraint_result( NoSuchObjectException o1, - MetaException o3) + MetaException o2) { this(); this.o1 = o1; - this.o3 = o3; + this.o2 = o2; } /** * Performs a deep copy on other. */ - public drop_table_result(drop_table_result other) { + public add_default_constraint_result(add_default_constraint_result other) { if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } - if (other.isSetO3()) { - this.o3 = new MetaException(other.o3); + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); } } - public drop_table_result deepCopy() { - return new drop_table_result(this); + public add_default_constraint_result deepCopy() { + return new add_default_constraint_result(this); } @Override public void clear() { this.o1 = null; - this.o3 = null; + this.o2 = null; } public NoSuchObjectException getO1() { @@ -53657,26 +53924,26 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO3() { - return this.o3; + public MetaException getO2() { + return this.o2; } - public void setO3(MetaException o3) { - this.o3 = o3; + public void setO2(MetaException o2) { + this.o2 = o2; } - public void unsetO3() { - this.o3 = null; + public void unsetO2() { + this.o2 = null; } - /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ - public boolean isSetO3() { - return this.o3 != null; + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; } - public void setO3IsSet(boolean value) { + public void setO2IsSet(boolean value) { if (!value) { - this.o3 = null; + this.o2 = null; } } @@ -53690,11 +53957,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case O3: + case O2: if (value == null) { - unsetO3(); + unsetO2(); } else { - setO3((MetaException)value); + setO2((MetaException)value); } break; @@ -53706,8 +53973,8 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); - case O3: - return getO3(); + case O2: + return getO2(); } throw new IllegalStateException(); @@ -53722,8 +53989,8 @@ public boolean isSet(_Fields field) { switch (field) { case O1: return isSetO1(); - case O3: - return isSetO3(); + case O2: + return isSetO2(); } throw new IllegalStateException(); } @@ -53732,12 +53999,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_table_result) - return this.equals((drop_table_result)that); + if (that instanceof add_default_constraint_result) + return this.equals((add_default_constraint_result)that); return false; } - public boolean equals(drop_table_result that) { + public boolean equals(add_default_constraint_result that) { if (that == null) return false; @@ -53750,12 +54017,12 @@ public boolean equals(drop_table_result that) { return false; } - boolean this_present_o3 = true && this.isSetO3(); - boolean that_present_o3 = true && that.isSetO3(); - if (this_present_o3 || that_present_o3) { - if (!(this_present_o3 && that_present_o3)) + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) return false; - if (!this.o3.equals(that.o3)) + if (!this.o2.equals(that.o2)) return false; } @@ -53771,16 +54038,16 @@ public int hashCode() { if (present_o1) list.add(o1); - boolean present_o3 = true && (isSetO3()); - list.add(present_o3); - if (present_o3) - list.add(o3); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); return list.hashCode(); } @Override - public int compareTo(drop_table_result other) { + public int compareTo(add_default_constraint_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -53797,12 +54064,12 @@ public int compareTo(drop_table_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); if (lastComparison != 0) { return lastComparison; } - if (isSetO3()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); if (lastComparison != 0) { return lastComparison; } @@ -53824,7 +54091,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_table_result("); + StringBuilder sb = new StringBuilder("add_default_constraint_result("); boolean first = true; sb.append("o1:"); @@ -53835,11 +54102,11 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { + sb.append("o2:"); + if (this.o2 == null) { sb.append("null"); } else { - sb.append(this.o3); + sb.append(this.o2); } first = false; sb.append(")"); @@ -53867,15 +54134,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_table_resultStandardSchemeFactory implements SchemeFactory { - public drop_table_resultStandardScheme getScheme() { - return new drop_table_resultStandardScheme(); + private static class add_default_constraint_resultStandardSchemeFactory implements SchemeFactory { + public add_default_constraint_resultStandardScheme getScheme() { + return new add_default_constraint_resultStandardScheme(); } } - private static class drop_table_resultStandardScheme extends StandardScheme { + private static class add_default_constraint_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_default_constraint_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -53894,11 +54161,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_result s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // O3 + case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new MetaException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -53912,7 +54179,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_default_constraint_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -53921,9 +54188,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_result struct.o1.write(oprot); oprot.writeFieldEnd(); } - if (struct.o3 != null) { - oprot.writeFieldBegin(O3_FIELD_DESC); - struct.o3.write(oprot); + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -53932,35 +54199,35 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_result } - private static class drop_table_resultTupleSchemeFactory implements SchemeFactory { - public drop_table_resultTupleScheme getScheme() { - return new drop_table_resultTupleScheme(); + private static class add_default_constraint_resultTupleSchemeFactory implements SchemeFactory { + public add_default_constraint_resultTupleScheme getScheme() { + return new add_default_constraint_resultTupleScheme(); } } - private static class drop_table_resultTupleScheme extends TupleScheme { + private static class add_default_constraint_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_default_constraint_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { optionals.set(0); } - if (struct.isSetO3()) { + if (struct.isSetO2()) { optionals.set(1); } oprot.writeBitSet(optionals, 2); if (struct.isSetO1()) { struct.o1.write(oprot); } - if (struct.isSetO3()) { - struct.o3.write(oprot); + if (struct.isSetO2()) { + struct.o2.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_default_constraint_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { @@ -53969,40 +54236,37 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_result st struct.setO1IsSet(true); } if (incoming.get(1)) { - struct.o3 = new MetaException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_with_environment_context_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_args"); private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)3); - private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_table_with_environment_context_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_table_with_environment_context_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_table_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_table_argsTupleSchemeFactory()); } private String dbname; // required private String name; // required private boolean deleteData; // required - private EnvironmentContext environment_context; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), NAME((short)2, "name"), - DELETE_DATA((short)3, "deleteData"), - ENVIRONMENT_CONTEXT((short)4, "environment_context"); + DELETE_DATA((short)3, "deleteData"); private static final Map byName = new HashMap(); @@ -54023,8 +54287,6 @@ public static _Fields findByThriftId(int fieldId) { return NAME; case 3: // DELETE_DATA return DELETE_DATA; - case 4: // ENVIRONMENT_CONTEXT - return ENVIRONMENT_CONTEXT; default: return null; } @@ -54076,33 +54338,29 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_with_environment_context_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_args.class, metaDataMap); } - public drop_table_with_environment_context_args() { + public drop_table_args() { } - public drop_table_with_environment_context_args( + public drop_table_args( String dbname, String name, - boolean deleteData, - EnvironmentContext environment_context) + boolean deleteData) { this(); this.dbname = dbname; this.name = name; this.deleteData = deleteData; setDeleteDataIsSet(true); - this.environment_context = environment_context; } /** * Performs a deep copy on other. */ - public drop_table_with_environment_context_args(drop_table_with_environment_context_args other) { + public drop_table_args(drop_table_args other) { __isset_bitfield = other.__isset_bitfield; if (other.isSetDbname()) { this.dbname = other.dbname; @@ -54111,13 +54369,10 @@ public drop_table_with_environment_context_args(drop_table_with_environment_cont this.name = other.name; } this.deleteData = other.deleteData; - if (other.isSetEnvironment_context()) { - this.environment_context = new EnvironmentContext(other.environment_context); - } } - public drop_table_with_environment_context_args deepCopy() { - return new drop_table_with_environment_context_args(this); + public drop_table_args deepCopy() { + return new drop_table_args(this); } @Override @@ -54126,7 +54381,6 @@ public void clear() { this.name = null; setDeleteDataIsSet(false); this.deleteData = false; - this.environment_context = null; } public String getDbname() { @@ -54197,29 +54451,6 @@ public void setDeleteDataIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); } - public EnvironmentContext getEnvironment_context() { - return this.environment_context; - } - - public void setEnvironment_context(EnvironmentContext environment_context) { - this.environment_context = environment_context; - } - - public void unsetEnvironment_context() { - this.environment_context = null; - } - - /** Returns true if field environment_context is set (has been assigned a value) and false otherwise */ - public boolean isSetEnvironment_context() { - return this.environment_context != null; - } - - public void setEnvironment_contextIsSet(boolean value) { - if (!value) { - this.environment_context = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -54246,14 +54477,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case ENVIRONMENT_CONTEXT: - if (value == null) { - unsetEnvironment_context(); - } else { - setEnvironment_context((EnvironmentContext)value); - } - break; - } } @@ -54268,9 +54491,6 @@ public Object getFieldValue(_Fields field) { case DELETE_DATA: return isDeleteData(); - case ENVIRONMENT_CONTEXT: - return getEnvironment_context(); - } throw new IllegalStateException(); } @@ -54288,8 +54508,6 @@ public boolean isSet(_Fields field) { return isSetName(); case DELETE_DATA: return isSetDeleteData(); - case ENVIRONMENT_CONTEXT: - return isSetEnvironment_context(); } throw new IllegalStateException(); } @@ -54298,12 +54516,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_table_with_environment_context_args) - return this.equals((drop_table_with_environment_context_args)that); + if (that instanceof drop_table_args) + return this.equals((drop_table_args)that); return false; } - public boolean equals(drop_table_with_environment_context_args that) { + public boolean equals(drop_table_args that) { if (that == null) return false; @@ -54334,15 +54552,6 @@ public boolean equals(drop_table_with_environment_context_args that) { return false; } - boolean this_present_environment_context = true && this.isSetEnvironment_context(); - boolean that_present_environment_context = true && that.isSetEnvironment_context(); - if (this_present_environment_context || that_present_environment_context) { - if (!(this_present_environment_context && that_present_environment_context)) - return false; - if (!this.environment_context.equals(that.environment_context)) - return false; - } - return true; } @@ -54365,16 +54574,11 @@ public int hashCode() { if (present_deleteData) list.add(deleteData); - boolean present_environment_context = true && (isSetEnvironment_context()); - list.add(present_environment_context); - if (present_environment_context) - list.add(environment_context); - return list.hashCode(); } @Override - public int compareTo(drop_table_with_environment_context_args other) { + public int compareTo(drop_table_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -54411,16 +54615,6 @@ public int compareTo(drop_table_with_environment_context_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetEnvironment_context()).compareTo(other.isSetEnvironment_context()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetEnvironment_context()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environment_context, other.environment_context); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -54438,7 +54632,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_table_with_environment_context_args("); + StringBuilder sb = new StringBuilder("drop_table_args("); boolean first = true; sb.append("dbname:"); @@ -54460,14 +54654,6 @@ public String toString() { sb.append("deleteData:"); sb.append(this.deleteData); first = false; - if (!first) sb.append(", "); - sb.append("environment_context:"); - if (this.environment_context == null) { - sb.append("null"); - } else { - sb.append(this.environment_context); - } - first = false; sb.append(")"); return sb.toString(); } @@ -54475,9 +54661,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (environment_context != null) { - environment_context.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -54498,15 +54681,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_table_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { - public drop_table_with_environment_context_argsStandardScheme getScheme() { - return new drop_table_with_environment_context_argsStandardScheme(); + private static class drop_table_argsStandardSchemeFactory implements SchemeFactory { + public drop_table_argsStandardScheme getScheme() { + return new drop_table_argsStandardScheme(); } } - private static class drop_table_with_environment_context_argsStandardScheme extends StandardScheme { + private static class drop_table_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -54540,15 +54723,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_env org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // ENVIRONMENT_CONTEXT - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.environment_context = new EnvironmentContext(); - struct.environment_context.read(iprot); - struct.setEnvironment_contextIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -54558,7 +54732,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_env struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -54575,27 +54749,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_en oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); oprot.writeBool(struct.deleteData); oprot.writeFieldEnd(); - if (struct.environment_context != null) { - oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); - struct.environment_context.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class drop_table_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { - public drop_table_with_environment_context_argsTupleScheme getScheme() { - return new drop_table_with_environment_context_argsTupleScheme(); + private static class drop_table_argsTupleSchemeFactory implements SchemeFactory { + public drop_table_argsTupleScheme getScheme() { + return new drop_table_argsTupleScheme(); } } - private static class drop_table_with_environment_context_argsTupleScheme extends TupleScheme { + private static class drop_table_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDbname()) { @@ -54607,10 +54776,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_env if (struct.isSetDeleteData()) { optionals.set(2); } - if (struct.isSetEnvironment_context()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); + oprot.writeBitSet(optionals, 3); if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -54620,15 +54786,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_env if (struct.isSetDeleteData()) { oprot.writeBool(struct.deleteData); } - if (struct.isSetEnvironment_context()) { - struct.environment_context.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -54641,26 +54804,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_envi struct.deleteData = iprot.readBool(); struct.setDeleteDataIsSet(true); } - if (incoming.get(3)) { - struct.environment_context = new EnvironmentContext(); - struct.environment_context.read(iprot); - struct.setEnvironment_contextIsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_with_environment_context_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_table_with_environment_context_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_table_with_environment_context_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_table_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_table_resultTupleSchemeFactory()); } private NoSuchObjectException o1; // required @@ -54736,13 +54894,13 @@ public String getFieldName() { tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_with_environment_context_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_result.class, metaDataMap); } - public drop_table_with_environment_context_result() { + public drop_table_result() { } - public drop_table_with_environment_context_result( + public drop_table_result( NoSuchObjectException o1, MetaException o3) { @@ -54754,7 +54912,7 @@ public drop_table_with_environment_context_result( /** * Performs a deep copy on other. */ - public drop_table_with_environment_context_result(drop_table_with_environment_context_result other) { + public drop_table_result(drop_table_result other) { if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } @@ -54763,8 +54921,8 @@ public drop_table_with_environment_context_result(drop_table_with_environment_co } } - public drop_table_with_environment_context_result deepCopy() { - return new drop_table_with_environment_context_result(this); + public drop_table_result deepCopy() { + return new drop_table_result(this); } @Override @@ -54871,12 +55029,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_table_with_environment_context_result) - return this.equals((drop_table_with_environment_context_result)that); + if (that instanceof drop_table_result) + return this.equals((drop_table_result)that); return false; } - public boolean equals(drop_table_with_environment_context_result that) { + public boolean equals(drop_table_result that) { if (that == null) return false; @@ -54919,7 +55077,7 @@ public int hashCode() { } @Override - public int compareTo(drop_table_with_environment_context_result other) { + public int compareTo(drop_table_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -54963,7 +55121,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_table_with_environment_context_result("); + StringBuilder sb = new StringBuilder("drop_table_result("); boolean first = true; sb.append("o1:"); @@ -55006,15 +55164,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { - public drop_table_with_environment_context_resultStandardScheme getScheme() { - return new drop_table_with_environment_context_resultStandardScheme(); + private static class drop_table_resultStandardSchemeFactory implements SchemeFactory { + public drop_table_resultStandardScheme getScheme() { + return new drop_table_resultStandardScheme(); } } - private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme { + private static class drop_table_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -55051,7 +55209,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_env struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -55071,16 +55229,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_en } - private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { - public drop_table_with_environment_context_resultTupleScheme getScheme() { - return new drop_table_with_environment_context_resultTupleScheme(); + private static class drop_table_resultTupleSchemeFactory implements SchemeFactory { + public drop_table_resultTupleScheme getScheme() { + return new drop_table_resultTupleScheme(); } } - private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme { + private static class drop_table_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -55099,7 +55257,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_env } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { @@ -55117,28 +55275,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_envi } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_with_environment_context_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_with_environment_context_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environment_context", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new truncate_table_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new truncate_table_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_table_with_environment_context_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_table_with_environment_context_argsTupleSchemeFactory()); } - private String dbName; // required - private String tableName; // required - private List partNames; // required + private String dbname; // required + private String name; // required + private boolean deleteData; // required + private EnvironmentContext environment_context; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "dbName"), - TABLE_NAME((short)2, "tableName"), - PART_NAMES((short)3, "partNames"); + DBNAME((short)1, "dbname"), + NAME((short)2, "name"), + DELETE_DATA((short)3, "deleteData"), + ENVIRONMENT_CONTEXT((short)4, "environment_context"); private static final Map byName = new HashMap(); @@ -55153,12 +55314,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_envi */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TABLE_NAME - return TABLE_NAME; - case 3: // PART_NAMES - return PART_NAMES; + case 1: // DBNAME + return DBNAME; + case 2: // NAME + return NAME; + case 3: // DELETE_DATA + return DELETE_DATA; + case 4: // ENVIRONMENT_CONTEXT + return ENVIRONMENT_CONTEXT; default: return null; } @@ -55199,168 +55362,192 @@ public String getFieldName() { } // isset id assignments + private static final int __DELETEDATA_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environment_context", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_with_environment_context_args.class, metaDataMap); } - public truncate_table_args() { + public drop_table_with_environment_context_args() { } - public truncate_table_args( - String dbName, - String tableName, - List partNames) + public drop_table_with_environment_context_args( + String dbname, + String name, + boolean deleteData, + EnvironmentContext environment_context) { this(); - this.dbName = dbName; - this.tableName = tableName; - this.partNames = partNames; + this.dbname = dbname; + this.name = name; + this.deleteData = deleteData; + setDeleteDataIsSet(true); + this.environment_context = environment_context; } /** * Performs a deep copy on other. */ - public truncate_table_args(truncate_table_args other) { - if (other.isSetDbName()) { - this.dbName = other.dbName; + public drop_table_with_environment_context_args(drop_table_with_environment_context_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbname()) { + this.dbname = other.dbname; } - if (other.isSetTableName()) { - this.tableName = other.tableName; + if (other.isSetName()) { + this.name = other.name; } - if (other.isSetPartNames()) { - List __this__partNames = new ArrayList(other.partNames); - this.partNames = __this__partNames; + this.deleteData = other.deleteData; + if (other.isSetEnvironment_context()) { + this.environment_context = new EnvironmentContext(other.environment_context); } } - public truncate_table_args deepCopy() { - return new truncate_table_args(this); + public drop_table_with_environment_context_args deepCopy() { + return new drop_table_with_environment_context_args(this); } @Override public void clear() { - this.dbName = null; - this.tableName = null; - this.partNames = null; + this.dbname = null; + this.name = null; + setDeleteDataIsSet(false); + this.deleteData = false; + this.environment_context = null; } - public String getDbName() { - return this.dbName; + public String getDbname() { + return this.dbname; } - public void setDbName(String dbName) { - this.dbName = dbName; + public void setDbname(String dbname) { + this.dbname = dbname; } - public void unsetDbName() { - this.dbName = null; + public void unsetDbname() { + this.dbname = null; } - /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ - public boolean isSetDbName() { - return this.dbName != null; + /** Returns true if field dbname is set (has been assigned a value) and false otherwise */ + public boolean isSetDbname() { + return this.dbname != null; } - public void setDbNameIsSet(boolean value) { + public void setDbnameIsSet(boolean value) { if (!value) { - this.dbName = null; + this.dbname = null; } } - public String getTableName() { - return this.tableName; + public String getName() { + return this.name; } - public void setTableName(String tableName) { - this.tableName = tableName; + public void setName(String name) { + this.name = name; } - public void unsetTableName() { - this.tableName = null; + public void unsetName() { + this.name = null; } - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; } - public void setTableNameIsSet(boolean value) { + public void setNameIsSet(boolean value) { if (!value) { - this.tableName = null; + this.name = null; } } - public int getPartNamesSize() { - return (this.partNames == null) ? 0 : this.partNames.size(); + public boolean isDeleteData() { + return this.deleteData; } - public java.util.Iterator getPartNamesIterator() { - return (this.partNames == null) ? null : this.partNames.iterator(); + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + setDeleteDataIsSet(true); } - public void addToPartNames(String elem) { - if (this.partNames == null) { - this.partNames = new ArrayList(); - } - this.partNames.add(elem); + public void unsetDeleteData() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID); } - public List getPartNames() { - return this.partNames; + /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ + public boolean isSetDeleteData() { + return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID); } - public void setPartNames(List partNames) { - this.partNames = partNames; + public void setDeleteDataIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); } - public void unsetPartNames() { - this.partNames = null; + public EnvironmentContext getEnvironment_context() { + return this.environment_context; } - /** Returns true if field partNames is set (has been assigned a value) and false otherwise */ - public boolean isSetPartNames() { - return this.partNames != null; + public void setEnvironment_context(EnvironmentContext environment_context) { + this.environment_context = environment_context; } - public void setPartNamesIsSet(boolean value) { + public void unsetEnvironment_context() { + this.environment_context = null; + } + + /** Returns true if field environment_context is set (has been assigned a value) and false otherwise */ + public boolean isSetEnvironment_context() { + return this.environment_context != null; + } + + public void setEnvironment_contextIsSet(boolean value) { if (!value) { - this.partNames = null; + this.environment_context = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: + case DBNAME: if (value == null) { - unsetDbName(); + unsetDbname(); } else { - setDbName((String)value); + setDbname((String)value); } break; - case TABLE_NAME: + case NAME: if (value == null) { - unsetTableName(); + unsetName(); } else { - setTableName((String)value); + setName((String)value); } break; - case PART_NAMES: + case DELETE_DATA: if (value == null) { - unsetPartNames(); + unsetDeleteData(); } else { - setPartNames((List)value); + setDeleteData((Boolean)value); + } + break; + + case ENVIRONMENT_CONTEXT: + if (value == null) { + unsetEnvironment_context(); + } else { + setEnvironment_context((EnvironmentContext)value); } break; @@ -55369,14 +55556,17 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDbName(); + case DBNAME: + return getDbname(); - case TABLE_NAME: - return getTableName(); + case NAME: + return getName(); - case PART_NAMES: - return getPartNames(); + case DELETE_DATA: + return isDeleteData(); + + case ENVIRONMENT_CONTEXT: + return getEnvironment_context(); } throw new IllegalStateException(); @@ -55389,12 +55579,14 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDbName(); - case TABLE_NAME: - return isSetTableName(); - case PART_NAMES: - return isSetPartNames(); + case DBNAME: + return isSetDbname(); + case NAME: + return isSetName(); + case DELETE_DATA: + return isSetDeleteData(); + case ENVIRONMENT_CONTEXT: + return isSetEnvironment_context(); } throw new IllegalStateException(); } @@ -55403,39 +55595,48 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof truncate_table_args) - return this.equals((truncate_table_args)that); + if (that instanceof drop_table_with_environment_context_args) + return this.equals((drop_table_with_environment_context_args)that); return false; } - public boolean equals(truncate_table_args that) { + public boolean equals(drop_table_with_environment_context_args that) { if (that == null) return false; - boolean this_present_dbName = true && this.isSetDbName(); - boolean that_present_dbName = true && that.isSetDbName(); - if (this_present_dbName || that_present_dbName) { - if (!(this_present_dbName && that_present_dbName)) + boolean this_present_dbname = true && this.isSetDbname(); + boolean that_present_dbname = true && that.isSetDbname(); + if (this_present_dbname || that_present_dbname) { + if (!(this_present_dbname && that_present_dbname)) return false; - if (!this.dbName.equals(that.dbName)) + if (!this.dbname.equals(that.dbname)) return false; } - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) return false; - if (!this.tableName.equals(that.tableName)) + if (!this.name.equals(that.name)) return false; } - boolean this_present_partNames = true && this.isSetPartNames(); - boolean that_present_partNames = true && that.isSetPartNames(); - if (this_present_partNames || that_present_partNames) { - if (!(this_present_partNames && that_present_partNames)) + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) return false; - if (!this.partNames.equals(that.partNames)) + if (this.deleteData != that.deleteData) + return false; + } + + boolean this_present_environment_context = true && this.isSetEnvironment_context(); + boolean that_present_environment_context = true && that.isSetEnvironment_context(); + if (this_present_environment_context || that_present_environment_context) { + if (!(this_present_environment_context && that_present_environment_context)) + return false; + if (!this.environment_context.equals(that.environment_context)) return false; } @@ -55446,58 +55647,73 @@ public boolean equals(truncate_table_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_dbName = true && (isSetDbName()); - list.add(present_dbName); - if (present_dbName) - list.add(dbName); + boolean present_dbname = true && (isSetDbname()); + list.add(present_dbname); + if (present_dbname) + list.add(dbname); - boolean present_tableName = true && (isSetTableName()); - list.add(present_tableName); - if (present_tableName) - list.add(tableName); + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); - boolean present_partNames = true && (isSetPartNames()); - list.add(present_partNames); - if (present_partNames) - list.add(partNames); + boolean present_deleteData = true; + list.add(present_deleteData); + if (present_deleteData) + list.add(deleteData); + + boolean present_environment_context = true && (isSetEnvironment_context()); + list.add(present_environment_context); + if (present_environment_context) + list.add(environment_context); return list.hashCode(); } @Override - public int compareTo(truncate_table_args other) { + public int compareTo(drop_table_with_environment_context_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname()); if (lastComparison != 0) { return lastComparison; } - if (isSetDbName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (isSetDbname()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); if (lastComparison != 0) { return lastComparison; } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames()); + lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData()); if (lastComparison != 0) { return lastComparison; } - if (isSetPartNames()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames); + if (isSetDeleteData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEnvironment_context()).compareTo(other.isSetEnvironment_context()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEnvironment_context()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environment_context, other.environment_context); if (lastComparison != 0) { return lastComparison; } @@ -55519,30 +55735,34 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("truncate_table_args("); + StringBuilder sb = new StringBuilder("drop_table_with_environment_context_args("); boolean first = true; - sb.append("dbName:"); - if (this.dbName == null) { + sb.append("dbname:"); + if (this.dbname == null) { sb.append("null"); } else { - sb.append(this.dbName); + sb.append(this.dbname); } first = false; if (!first) sb.append(", "); - sb.append("tableName:"); - if (this.tableName == null) { + sb.append("name:"); + if (this.name == null) { sb.append("null"); } else { - sb.append(this.tableName); + sb.append(this.name); } first = false; if (!first) sb.append(", "); - sb.append("partNames:"); - if (this.partNames == null) { + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; + if (!first) sb.append(", "); + sb.append("environment_context:"); + if (this.environment_context == null) { sb.append("null"); } else { - sb.append(this.partNames); + sb.append(this.environment_context); } first = false; sb.append(")"); @@ -55552,6 +55772,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (environment_context != null) { + environment_context.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -55564,21 +55787,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class truncate_table_argsStandardSchemeFactory implements SchemeFactory { - public truncate_table_argsStandardScheme getScheme() { - return new truncate_table_argsStandardScheme(); + private static class drop_table_with_environment_context_argsStandardSchemeFactory implements SchemeFactory { + public drop_table_with_environment_context_argsStandardScheme getScheme() { + return new drop_table_with_environment_context_argsStandardScheme(); } } - private static class truncate_table_argsStandardScheme extends StandardScheme { + private static class drop_table_with_environment_context_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -55588,36 +55813,35 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // DBNAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.dbName = iprot.readString(); - struct.setDbNameIsSet(true); + struct.dbname = iprot.readString(); + struct.setDbnameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 2: // NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); + struct.name = iprot.readString(); + struct.setNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // PART_NAMES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list948 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list948.size); - String _elem949; - for (int _i950 = 0; _i950 < _list948.size; ++_i950) - { - _elem949 = iprot.readString(); - struct.partNames.add(_elem949); - } - iprot.readListEnd(); - } - struct.setPartNamesIsSet(true); + case 3: // DELETE_DATA + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.deleteData = iprot.readBool(); + struct.setDeleteDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // ENVIRONMENT_CONTEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.environment_context = new EnvironmentContext(); + struct.environment_context.read(iprot); + struct.setEnvironment_contextIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -55631,30 +55855,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.dbName != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.dbName); + if (struct.dbname != null) { + oprot.writeFieldBegin(DBNAME_FIELD_DESC); + oprot.writeString(struct.dbname); oprot.writeFieldEnd(); } - if (struct.tableName != null) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.tableName); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); oprot.writeFieldEnd(); } - if (struct.partNames != null) { - oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter951 : struct.partNames) - { - oprot.writeString(_iter951); - } - oprot.writeListEnd(); - } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(struct.deleteData); + oprot.writeFieldEnd(); + if (struct.environment_context != null) { + oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC); + struct.environment_context.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -55663,91 +55883,90 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg } - private static class truncate_table_argsTupleSchemeFactory implements SchemeFactory { - public truncate_table_argsTupleScheme getScheme() { - return new truncate_table_argsTupleScheme(); + private static class drop_table_with_environment_context_argsTupleSchemeFactory implements SchemeFactory { + public drop_table_with_environment_context_argsTupleScheme getScheme() { + return new drop_table_with_environment_context_argsTupleScheme(); } } - private static class truncate_table_argsTupleScheme extends TupleScheme { + private static class drop_table_with_environment_context_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDbName()) { + if (struct.isSetDbname()) { optionals.set(0); } - if (struct.isSetTableName()) { + if (struct.isSetName()) { optionals.set(1); } - if (struct.isSetPartNames()) { + if (struct.isSetDeleteData()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); - if (struct.isSetDbName()) { - oprot.writeString(struct.dbName); + if (struct.isSetEnvironment_context()) { + optionals.set(3); } - if (struct.isSetTableName()) { - oprot.writeString(struct.tableName); + oprot.writeBitSet(optionals, 4); + if (struct.isSetDbname()) { + oprot.writeString(struct.dbname); } - if (struct.isSetPartNames()) { - { - oprot.writeI32(struct.partNames.size()); - for (String _iter952 : struct.partNames) - { - oprot.writeString(_iter952); - } - } + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + if (struct.isSetDeleteData()) { + oprot.writeBool(struct.deleteData); + } + if (struct.isSetEnvironment_context()) { + struct.environment_context.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.dbName = iprot.readString(); - struct.setDbNameIsSet(true); + struct.dbname = iprot.readString(); + struct.setDbnameIsSet(true); } if (incoming.get(1)) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); + struct.name = iprot.readString(); + struct.setNameIsSet(true); } if (incoming.get(2)) { - { - org.apache.thrift.protocol.TList _list953 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list953.size); - String _elem954; - for (int _i955 = 0; _i955 < _list953.size; ++_i955) - { - _elem954 = iprot.readString(); - struct.partNames.add(_elem954); - } - } - struct.setPartNamesIsSet(true); + struct.deleteData = iprot.readBool(); + struct.setDeleteDataIsSet(true); + } + if (incoming.get(3)) { + struct.environment_context = new EnvironmentContext(); + struct.environment_context.read(iprot); + struct.setEnvironment_contextIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_table_with_environment_context_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_table_with_environment_context_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new truncate_table_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new truncate_table_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_table_with_environment_context_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_table_with_environment_context_resultTupleSchemeFactory()); } - private MetaException o1; // required + private NoSuchObjectException o1; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"); + O1((short)1, "o1"), + O3((short)2, "o3"); private static final Map byName = new HashMap(); @@ -55764,6 +55983,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // O1 return O1; + case 2: // O3 + return O3; default: return null; } @@ -55809,43 +56030,51 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_table_with_environment_context_result.class, metaDataMap); } - public truncate_table_result() { + public drop_table_with_environment_context_result() { } - public truncate_table_result( - MetaException o1) + public drop_table_with_environment_context_result( + NoSuchObjectException o1, + MetaException o3) { this(); this.o1 = o1; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public truncate_table_result(truncate_table_result other) { + public drop_table_with_environment_context_result(drop_table_with_environment_context_result other) { if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } - public truncate_table_result deepCopy() { - return new truncate_table_result(this); + public drop_table_with_environment_context_result deepCopy() { + return new drop_table_with_environment_context_result(this); } @Override public void clear() { this.o1 = null; + this.o3 = null; } - public MetaException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(MetaException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -55864,13 +56093,44 @@ public void setO1IsSet(boolean value) { } } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case O1: if (value == null) { unsetO1(); } else { - setO1((MetaException)value); + setO1((NoSuchObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -55882,6 +56142,9 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -55895,6 +56158,8 @@ public boolean isSet(_Fields field) { switch (field) { case O1: return isSetO1(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -55903,12 +56168,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof truncate_table_result) - return this.equals((truncate_table_result)that); + if (that instanceof drop_table_with_environment_context_result) + return this.equals((drop_table_with_environment_context_result)that); return false; } - public boolean equals(truncate_table_result that) { + public boolean equals(drop_table_with_environment_context_result that) { if (that == null) return false; @@ -55921,6 +56186,15 @@ public boolean equals(truncate_table_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -55933,11 +56207,16 @@ public int hashCode() { if (present_o1) list.add(o1); + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(truncate_table_result other) { + public int compareTo(drop_table_with_environment_context_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -55954,6 +56233,16 @@ public int compareTo(truncate_table_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -55971,7 +56260,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("truncate_table_result("); + StringBuilder sb = new StringBuilder("drop_table_with_environment_context_result("); boolean first = true; sb.append("o1:"); @@ -55981,6 +56270,14 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -56006,15 +56303,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class truncate_table_resultStandardSchemeFactory implements SchemeFactory { - public truncate_table_resultStandardScheme getScheme() { - return new truncate_table_resultStandardScheme(); + private static class drop_table_with_environment_context_resultStandardSchemeFactory implements SchemeFactory { + public drop_table_with_environment_context_resultStandardScheme getScheme() { + return new drop_table_with_environment_context_resultStandardScheme(); } } - private static class truncate_table_resultStandardScheme extends StandardScheme { + private static class drop_table_with_environment_context_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -56026,13 +56323,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_resu switch (schemeField.id) { case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -56042,7 +56348,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_resu struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -56051,66 +56357,85 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_res struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class truncate_table_resultTupleSchemeFactory implements SchemeFactory { - public truncate_table_resultTupleScheme getScheme() { - return new truncate_table_resultTupleScheme(); + private static class drop_table_with_environment_context_resultTupleSchemeFactory implements SchemeFactory { + public drop_table_with_environment_context_resultTupleScheme getScheme() { + return new drop_table_with_environment_context_resultTupleScheme(); } } - private static class truncate_table_resultTupleScheme extends TupleScheme { + private static class drop_table_with_environment_context_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO3()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_table_with_environment_context_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(1)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("pattern", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_tables_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_tables_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new truncate_table_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new truncate_table_argsTupleSchemeFactory()); } - private String db_name; // required - private String pattern; // required + private String dbName; // required + private String tableName; // required + private List partNames; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - PATTERN((short)2, "pattern"); + DB_NAME((short)1, "dbName"), + TABLE_NAME((short)2, "tableName"), + PART_NAMES((short)3, "partNames"); private static final Map byName = new HashMap(); @@ -56127,8 +56452,10 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // DB_NAME return DB_NAME; - case 2: // PATTERN - return PATTERN; + case 2: // TABLE_NAME + return TABLE_NAME; + case 3: // PART_NAMES + return PART_NAMES; default: return null; } @@ -56172,91 +56499,139 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PATTERN, new org.apache.thrift.meta_data.FieldMetaData("pattern", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_args.class, metaDataMap); } - public get_tables_args() { + public truncate_table_args() { } - public get_tables_args( - String db_name, - String pattern) + public truncate_table_args( + String dbName, + String tableName, + List partNames) { this(); - this.db_name = db_name; - this.pattern = pattern; + this.dbName = dbName; + this.tableName = tableName; + this.partNames = partNames; } /** * Performs a deep copy on other. */ - public get_tables_args(get_tables_args other) { - if (other.isSetDb_name()) { - this.db_name = other.db_name; + public truncate_table_args(truncate_table_args other) { + if (other.isSetDbName()) { + this.dbName = other.dbName; } - if (other.isSetPattern()) { - this.pattern = other.pattern; + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + if (other.isSetPartNames()) { + List __this__partNames = new ArrayList(other.partNames); + this.partNames = __this__partNames; } } - public get_tables_args deepCopy() { - return new get_tables_args(this); + public truncate_table_args deepCopy() { + return new truncate_table_args(this); } @Override public void clear() { - this.db_name = null; - this.pattern = null; + this.dbName = null; + this.tableName = null; + this.partNames = null; } - public String getDb_name() { - return this.db_name; + public String getDbName() { + return this.dbName; } - public void setDb_name(String db_name) { - this.db_name = db_name; + public void setDbName(String dbName) { + this.dbName = dbName; } - public void unsetDb_name() { - this.db_name = null; + public void unsetDbName() { + this.dbName = null; } - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; } - public void setDb_nameIsSet(boolean value) { + public void setDbNameIsSet(boolean value) { if (!value) { - this.db_name = null; + this.dbName = null; } } - public String getPattern() { - return this.pattern; + public String getTableName() { + return this.tableName; } - public void setPattern(String pattern) { - this.pattern = pattern; + public void setTableName(String tableName) { + this.tableName = tableName; } - public void unsetPattern() { - this.pattern = null; + public void unsetTableName() { + this.tableName = null; } - /** Returns true if field pattern is set (has been assigned a value) and false otherwise */ - public boolean isSetPattern() { - return this.pattern != null; + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; } - public void setPatternIsSet(boolean value) { + public void setTableNameIsSet(boolean value) { if (!value) { - this.pattern = null; + this.tableName = null; + } + } + + public int getPartNamesSize() { + return (this.partNames == null) ? 0 : this.partNames.size(); + } + + public java.util.Iterator getPartNamesIterator() { + return (this.partNames == null) ? null : this.partNames.iterator(); + } + + public void addToPartNames(String elem) { + if (this.partNames == null) { + this.partNames = new ArrayList(); + } + this.partNames.add(elem); + } + + public List getPartNames() { + return this.partNames; + } + + public void setPartNames(List partNames) { + this.partNames = partNames; + } + + public void unsetPartNames() { + this.partNames = null; + } + + /** Returns true if field partNames is set (has been assigned a value) and false otherwise */ + public boolean isSetPartNames() { + return this.partNames != null; + } + + public void setPartNamesIsSet(boolean value) { + if (!value) { + this.partNames = null; } } @@ -56264,17 +56639,25 @@ public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: if (value == null) { - unsetDb_name(); + unsetDbName(); } else { - setDb_name((String)value); + setDbName((String)value); } break; - case PATTERN: + case TABLE_NAME: if (value == null) { - unsetPattern(); + unsetTableName(); } else { - setPattern((String)value); + setTableName((String)value); + } + break; + + case PART_NAMES: + if (value == null) { + unsetPartNames(); + } else { + setPartNames((List)value); } break; @@ -56284,10 +56667,13 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case DB_NAME: - return getDb_name(); + return getDbName(); - case PATTERN: - return getPattern(); + case TABLE_NAME: + return getTableName(); + + case PART_NAMES: + return getPartNames(); } throw new IllegalStateException(); @@ -56301,9 +56687,11 @@ public boolean isSet(_Fields field) { switch (field) { case DB_NAME: - return isSetDb_name(); - case PATTERN: - return isSetPattern(); + return isSetDbName(); + case TABLE_NAME: + return isSetTableName(); + case PART_NAMES: + return isSetPartNames(); } throw new IllegalStateException(); } @@ -56312,30 +56700,39 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_tables_args) - return this.equals((get_tables_args)that); + if (that instanceof truncate_table_args) + return this.equals((truncate_table_args)that); return false; } - public boolean equals(get_tables_args that) { + public boolean equals(truncate_table_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) return false; - if (!this.db_name.equals(that.db_name)) + if (!this.dbName.equals(that.dbName)) return false; } - boolean this_present_pattern = true && this.isSetPattern(); - boolean that_present_pattern = true && that.isSetPattern(); - if (this_present_pattern || that_present_pattern) { - if (!(this_present_pattern && that_present_pattern)) + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) return false; - if (!this.pattern.equals(that.pattern)) + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_partNames = true && this.isSetPartNames(); + boolean that_present_partNames = true && that.isSetPartNames(); + if (this_present_partNames || that_present_partNames) { + if (!(this_present_partNames && that_present_partNames)) + return false; + if (!this.partNames.equals(that.partNames)) return false; } @@ -56346,43 +56743,58 @@ public boolean equals(get_tables_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); - boolean present_pattern = true && (isSetPattern()); - list.add(present_pattern); - if (present_pattern) - list.add(pattern); + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + boolean present_partNames = true && (isSetPartNames()); + list.add(present_partNames); + if (present_partNames) + list.add(partNames); return list.hashCode(); } @Override - public int compareTo(get_tables_args other) { + public int compareTo(truncate_table_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetPattern()).compareTo(other.isSetPattern()); + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); if (lastComparison != 0) { return lastComparison; } - if (isSetPattern()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pattern, other.pattern); + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartNames()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames); if (lastComparison != 0) { return lastComparison; } @@ -56404,22 +56816,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_tables_args("); + StringBuilder sb = new StringBuilder("truncate_table_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { + sb.append("dbName:"); + if (this.dbName == null) { sb.append("null"); } else { - sb.append(this.db_name); + sb.append(this.dbName); } first = false; if (!first) sb.append(", "); - sb.append("pattern:"); - if (this.pattern == null) { + sb.append("tableName:"); + if (this.tableName == null) { sb.append("null"); } else { - sb.append(this.pattern); + sb.append(this.tableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("partNames:"); + if (this.partNames == null) { + sb.append("null"); + } else { + sb.append(this.partNames); } first = false; sb.append(")"); @@ -56447,15 +56867,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_tables_argsStandardSchemeFactory implements SchemeFactory { - public get_tables_argsStandardScheme getScheme() { - return new get_tables_argsStandardScheme(); + private static class truncate_table_argsStandardSchemeFactory implements SchemeFactory { + public truncate_table_argsStandardScheme getScheme() { + return new truncate_table_argsStandardScheme(); } } - private static class get_tables_argsStandardScheme extends StandardScheme { + private static class truncate_table_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -56467,16 +56887,34 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_args str switch (schemeField.id) { case 1: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // PATTERN + case 2: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.pattern = iprot.readString(); - struct.setPatternIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PART_NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list972 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list972.size); + String _elem973; + for (int _i974 = 0; _i974 < _list972.size; ++_i974) + { + _elem973 = iprot.readString(); + struct.partNames.add(_elem973); + } + iprot.readListEnd(); + } + struct.setPartNamesIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -56490,18 +56928,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { + if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); + oprot.writeString(struct.dbName); oprot.writeFieldEnd(); } - if (struct.pattern != null) { - oprot.writeFieldBegin(PATTERN_FIELD_DESC); - oprot.writeString(struct.pattern); + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + if (struct.partNames != null) { + oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); + for (String _iter975 : struct.partNames) + { + oprot.writeString(_iter975); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -56510,68 +56960,90 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_args st } - private static class get_tables_argsTupleSchemeFactory implements SchemeFactory { - public get_tables_argsTupleScheme getScheme() { - return new get_tables_argsTupleScheme(); + private static class truncate_table_argsTupleSchemeFactory implements SchemeFactory { + public truncate_table_argsTupleScheme getScheme() { + return new truncate_table_argsTupleScheme(); } } - private static class get_tables_argsTupleScheme extends TupleScheme { + private static class truncate_table_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetDbName()) { optionals.set(0); } - if (struct.isSetPattern()) { + if (struct.isSetTableName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); + if (struct.isSetPartNames()) { + optionals.set(2); } - if (struct.isSetPattern()) { - oprot.writeString(struct.pattern); + oprot.writeBitSet(optionals, 3); + if (struct.isSetDbName()) { + oprot.writeString(struct.dbName); + } + if (struct.isSetTableName()) { + oprot.writeString(struct.tableName); + } + if (struct.isSetPartNames()) { + { + oprot.writeI32(struct.partNames.size()); + for (String _iter976 : struct.partNames) + { + oprot.writeString(_iter976); + } + } } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); } if (incoming.get(1)) { - struct.pattern = iprot.readString(); - struct.setPatternIsSet(true); + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list977 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list977.size); + String _elem978; + for (int _i979 = 0; _i979 < _list977.size; ++_i979) + { + _elem978 = iprot.readString(); + struct.partNames.add(_elem978); + } + } + struct.setPartNamesIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("truncate_table_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_tables_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_tables_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new truncate_table_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new truncate_table_resultTupleSchemeFactory()); } - private List success; // required private MetaException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), O1((short)1, "o1"); private static final Map byName = new HashMap(); @@ -56587,8 +57059,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_args stru */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; case 1: // O1 return O1; default: @@ -56634,88 +57104,40 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(truncate_table_result.class, metaDataMap); } - public get_tables_result() { + public truncate_table_result() { } - public get_tables_result( - List success, + public truncate_table_result( MetaException o1) { this(); - this.success = success; this.o1 = o1; } /** * Performs a deep copy on other. */ - public get_tables_result(get_tables_result other) { - if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success); - this.success = __this__success; - } + public truncate_table_result(truncate_table_result other) { if (other.isSetO1()) { this.o1 = new MetaException(other.o1); } } - public get_tables_result deepCopy() { - return new get_tables_result(this); + public truncate_table_result deepCopy() { + return new truncate_table_result(this); } @Override public void clear() { - this.success = null; this.o1 = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(String elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { - return this.success; - } - - public void setSuccess(List success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - public MetaException getO1() { return this.o1; } @@ -56741,14 +57163,6 @@ public void setO1IsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((List)value); - } - break; - case O1: if (value == null) { unsetO1(); @@ -56762,9 +57176,6 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); - case O1: return getO1(); @@ -56779,8 +57190,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); case O1: return isSetO1(); } @@ -56791,24 +57200,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_tables_result) - return this.equals((get_tables_result)that); + if (that instanceof truncate_table_result) + return this.equals((truncate_table_result)that); return false; } - public boolean equals(get_tables_result that) { + public boolean equals(truncate_table_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -56825,11 +57225,6 @@ public boolean equals(get_tables_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -56839,23 +57234,13 @@ public int hashCode() { } @Override - public int compareTo(get_tables_result other) { + public int compareTo(truncate_table_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -56883,17 +57268,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_tables_result("); + StringBuilder sb = new StringBuilder("truncate_table_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -56926,15 +57303,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_tables_resultStandardSchemeFactory implements SchemeFactory { - public get_tables_resultStandardScheme getScheme() { - return new get_tables_resultStandardScheme(); + private static class truncate_table_resultStandardSchemeFactory implements SchemeFactory { + public truncate_table_resultStandardScheme getScheme() { + return new truncate_table_resultStandardScheme(); } } - private static class get_tables_resultStandardScheme extends StandardScheme { + private static class truncate_table_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -56944,24 +57321,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list956 = iprot.readListBegin(); - struct.success = new ArrayList(_list956.size); - String _elem957; - for (int _i958 = 0; _i958 < _list956.size; ++_i958) - { - _elem957 = iprot.readString(); - struct.success.add(_elem957); - } - iprot.readListEnd(); - } - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o1 = new MetaException(); @@ -56980,22 +57339,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter959 : struct.success) - { - oprot.writeString(_iter959); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -57007,57 +57354,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result } - private static class get_tables_resultTupleSchemeFactory implements SchemeFactory { - public get_tables_resultTupleScheme getScheme() { - return new get_tables_resultTupleScheme(); + private static class truncate_table_resultTupleSchemeFactory implements SchemeFactory { + public truncate_table_resultTupleScheme getScheme() { + return new truncate_table_resultTupleScheme(); } } - private static class get_tables_resultTupleScheme extends TupleScheme { + private static class truncate_table_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } if (struct.isSetO1()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (String _iter960 : struct.success) - { - oprot.writeString(_iter960); - } - } + optionals.set(0); } + oprot.writeBitSet(optionals, 1); if (struct.isSetO1()) { struct.o1.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list961 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list961.size); - String _elem962; - for (int _i963 = 0; _i963 < _list961.size; ++_i963) - { - _elem962 = iprot.readString(); - struct.success.add(_elem962); - } - } - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); @@ -57067,28 +57389,25 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_by_type_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_by_type_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("pattern", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_tables_by_type_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_tables_by_type_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_tables_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_tables_argsTupleSchemeFactory()); } private String db_name; // required private String pattern; // required - private String tableType; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - PATTERN((short)2, "pattern"), - TABLE_TYPE((short)3, "tableType"); + PATTERN((short)2, "pattern"); private static final Map byName = new HashMap(); @@ -57107,8 +57426,6 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // PATTERN return PATTERN; - case 3: // TABLE_TYPE - return TABLE_TYPE; default: return null; } @@ -57156,50 +57473,42 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PATTERN, new org.apache.thrift.meta_data.FieldMetaData("pattern", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_by_type_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_args.class, metaDataMap); } - public get_tables_by_type_args() { + public get_tables_args() { } - public get_tables_by_type_args( + public get_tables_args( String db_name, - String pattern, - String tableType) + String pattern) { this(); this.db_name = db_name; this.pattern = pattern; - this.tableType = tableType; } /** * Performs a deep copy on other. */ - public get_tables_by_type_args(get_tables_by_type_args other) { + public get_tables_args(get_tables_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } if (other.isSetPattern()) { this.pattern = other.pattern; } - if (other.isSetTableType()) { - this.tableType = other.tableType; - } } - public get_tables_by_type_args deepCopy() { - return new get_tables_by_type_args(this); + public get_tables_args deepCopy() { + return new get_tables_args(this); } @Override public void clear() { this.db_name = null; this.pattern = null; - this.tableType = null; } public String getDb_name() { @@ -57248,29 +57557,6 @@ public void setPatternIsSet(boolean value) { } } - public String getTableType() { - return this.tableType; - } - - public void setTableType(String tableType) { - this.tableType = tableType; - } - - public void unsetTableType() { - this.tableType = null; - } - - /** Returns true if field tableType is set (has been assigned a value) and false otherwise */ - public boolean isSetTableType() { - return this.tableType != null; - } - - public void setTableTypeIsSet(boolean value) { - if (!value) { - this.tableType = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -57289,14 +57575,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case TABLE_TYPE: - if (value == null) { - unsetTableType(); - } else { - setTableType((String)value); - } - break; - } } @@ -57308,9 +57586,6 @@ public Object getFieldValue(_Fields field) { case PATTERN: return getPattern(); - case TABLE_TYPE: - return getTableType(); - } throw new IllegalStateException(); } @@ -57326,8 +57601,6 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case PATTERN: return isSetPattern(); - case TABLE_TYPE: - return isSetTableType(); } throw new IllegalStateException(); } @@ -57336,12 +57609,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_tables_by_type_args) - return this.equals((get_tables_by_type_args)that); + if (that instanceof get_tables_args) + return this.equals((get_tables_args)that); return false; } - public boolean equals(get_tables_by_type_args that) { + public boolean equals(get_tables_args that) { if (that == null) return false; @@ -57363,15 +57636,6 @@ public boolean equals(get_tables_by_type_args that) { return false; } - boolean this_present_tableType = true && this.isSetTableType(); - boolean that_present_tableType = true && that.isSetTableType(); - if (this_present_tableType || that_present_tableType) { - if (!(this_present_tableType && that_present_tableType)) - return false; - if (!this.tableType.equals(that.tableType)) - return false; - } - return true; } @@ -57389,16 +57653,11 @@ public int hashCode() { if (present_pattern) list.add(pattern); - boolean present_tableType = true && (isSetTableType()); - list.add(present_tableType); - if (present_tableType) - list.add(tableType); - return list.hashCode(); } @Override - public int compareTo(get_tables_by_type_args other) { + public int compareTo(get_tables_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -57425,16 +57684,6 @@ public int compareTo(get_tables_by_type_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetTableType()).compareTo(other.isSetTableType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableType, other.tableType); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -57452,7 +57701,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_tables_by_type_args("); + StringBuilder sb = new StringBuilder("get_tables_args("); boolean first = true; sb.append("db_name:"); @@ -57470,14 +57719,6 @@ public String toString() { sb.append(this.pattern); } first = false; - if (!first) sb.append(", "); - sb.append("tableType:"); - if (this.tableType == null) { - sb.append("null"); - } else { - sb.append(this.tableType); - } - first = false; sb.append(")"); return sb.toString(); } @@ -57503,15 +57744,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_tables_by_type_argsStandardSchemeFactory implements SchemeFactory { - public get_tables_by_type_argsStandardScheme getScheme() { - return new get_tables_by_type_argsStandardScheme(); + private static class get_tables_argsStandardSchemeFactory implements SchemeFactory { + public get_tables_argsStandardScheme getScheme() { + return new get_tables_argsStandardScheme(); } } - private static class get_tables_by_type_argsStandardScheme extends StandardScheme { + private static class get_tables_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -57537,14 +57778,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // TABLE_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableType = iprot.readString(); - struct.setTableTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -57554,7 +57787,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -57568,27 +57801,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeString(struct.pattern); oprot.writeFieldEnd(); } - if (struct.tableType != null) { - oprot.writeFieldBegin(TABLE_TYPE_FIELD_DESC); - oprot.writeString(struct.tableType); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_tables_by_type_argsTupleSchemeFactory implements SchemeFactory { - public get_tables_by_type_argsTupleScheme getScheme() { - return new get_tables_by_type_argsTupleScheme(); + private static class get_tables_argsTupleSchemeFactory implements SchemeFactory { + public get_tables_argsTupleScheme getScheme() { + return new get_tables_argsTupleScheme(); } } - private static class get_tables_by_type_argsTupleScheme extends TupleScheme { + private static class get_tables_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -57597,25 +57825,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetPattern()) { optionals.set(1); } - if (struct.isSetTableType()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); + oprot.writeBitSet(optionals, 2); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetPattern()) { oprot.writeString(struct.pattern); } - if (struct.isSetTableType()) { - oprot.writeString(struct.tableType); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -57624,25 +57846,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_a struct.pattern = iprot.readString(); struct.setPatternIsSet(true); } - if (incoming.get(2)) { - struct.tableType = iprot.readString(); - struct.setTableTypeIsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_by_type_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_by_type_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_tables_by_type_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_tables_by_type_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_tables_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_tables_resultTupleSchemeFactory()); } private List success; // required @@ -57719,13 +57937,13 @@ public String getFieldName() { tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_by_type_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_result.class, metaDataMap); } - public get_tables_by_type_result() { + public get_tables_result() { } - public get_tables_by_type_result( + public get_tables_result( List success, MetaException o1) { @@ -57737,7 +57955,7 @@ public get_tables_by_type_result( /** * Performs a deep copy on other. */ - public get_tables_by_type_result(get_tables_by_type_result other) { + public get_tables_result(get_tables_result other) { if (other.isSetSuccess()) { List __this__success = new ArrayList(other.success); this.success = __this__success; @@ -57747,8 +57965,8 @@ public get_tables_by_type_result(get_tables_by_type_result other) { } } - public get_tables_by_type_result deepCopy() { - return new get_tables_by_type_result(this); + public get_tables_result deepCopy() { + return new get_tables_result(this); } @Override @@ -57870,12 +58088,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_tables_by_type_result) - return this.equals((get_tables_by_type_result)that); + if (that instanceof get_tables_result) + return this.equals((get_tables_result)that); return false; } - public boolean equals(get_tables_by_type_result that) { + public boolean equals(get_tables_result that) { if (that == null) return false; @@ -57918,7 +58136,7 @@ public int hashCode() { } @Override - public int compareTo(get_tables_by_type_result other) { + public int compareTo(get_tables_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -57962,7 +58180,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_tables_by_type_result("); + StringBuilder sb = new StringBuilder("get_tables_result("); boolean first = true; sb.append("success:"); @@ -58005,15 +58223,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_tables_by_type_resultStandardSchemeFactory implements SchemeFactory { - public get_tables_by_type_resultStandardScheme getScheme() { - return new get_tables_by_type_resultStandardScheme(); + private static class get_tables_resultStandardSchemeFactory implements SchemeFactory { + public get_tables_resultStandardScheme getScheme() { + return new get_tables_resultStandardScheme(); } } - private static class get_tables_by_type_resultStandardScheme extends StandardScheme { + private static class get_tables_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -58026,13 +58244,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list964 = iprot.readListBegin(); - struct.success = new ArrayList(_list964.size); - String _elem965; - for (int _i966 = 0; _i966 < _list964.size; ++_i966) + org.apache.thrift.protocol.TList _list980 = iprot.readListBegin(); + struct.success = new ArrayList(_list980.size); + String _elem981; + for (int _i982 = 0; _i982 < _list980.size; ++_i982) { - _elem965 = iprot.readString(); - struct.success.add(_elem965); + _elem981 = iprot.readString(); + struct.success.add(_elem981); } iprot.readListEnd(); } @@ -58059,7 +58277,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -58067,9 +58285,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter967 : struct.success) + for (String _iter983 : struct.success) { - oprot.writeString(_iter967); + oprot.writeString(_iter983); } oprot.writeListEnd(); } @@ -58086,16 +58304,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type } - private static class get_tables_by_type_resultTupleSchemeFactory implements SchemeFactory { - public get_tables_by_type_resultTupleScheme getScheme() { - return new get_tables_by_type_resultTupleScheme(); + private static class get_tables_resultTupleSchemeFactory implements SchemeFactory { + public get_tables_resultTupleScheme getScheme() { + return new get_tables_resultTupleScheme(); } } - private static class get_tables_by_type_resultTupleScheme extends TupleScheme { + private static class get_tables_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -58108,9 +58326,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter968 : struct.success) + for (String _iter984 : struct.success) { - oprot.writeString(_iter968); + oprot.writeString(_iter984); } } } @@ -58120,18 +58338,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list969 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list969.size); - String _elem970; - for (int _i971 = 0; _i971 < _list969.size; ++_i971) + org.apache.thrift.protocol.TList _list985 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list985.size); + String _elem986; + for (int _i987 = 0; _i987 < _list985.size; ++_i987) { - _elem970 = iprot.readString(); - struct.success.add(_elem970); + _elem986 = iprot.readString(); + struct.success.add(_elem986); } } struct.setSuccessIsSet(true); @@ -58146,22 +58364,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialized_views_for_rewriting_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialized_views_for_rewriting_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_by_type_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_by_type_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("pattern", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_materialized_views_for_rewriting_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_materialized_views_for_rewriting_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_tables_by_type_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_tables_by_type_argsTupleSchemeFactory()); } private String db_name; // required + private String pattern; // required + private String tableType; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"); + DB_NAME((short)1, "db_name"), + PATTERN((short)2, "pattern"), + TABLE_TYPE((short)3, "tableType"); private static final Map byName = new HashMap(); @@ -58178,6 +58402,10 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // DB_NAME return DB_NAME; + case 2: // PATTERN + return PATTERN; + case 3: // TABLE_TYPE + return TABLE_TYPE; default: return null; } @@ -58223,36 +58451,52 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PATTERN, new org.apache.thrift.meta_data.FieldMetaData("pattern", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialized_views_for_rewriting_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_by_type_args.class, metaDataMap); } - public get_materialized_views_for_rewriting_args() { + public get_tables_by_type_args() { } - public get_materialized_views_for_rewriting_args( - String db_name) + public get_tables_by_type_args( + String db_name, + String pattern, + String tableType) { this(); this.db_name = db_name; + this.pattern = pattern; + this.tableType = tableType; } /** * Performs a deep copy on other. */ - public get_materialized_views_for_rewriting_args(get_materialized_views_for_rewriting_args other) { + public get_tables_by_type_args(get_tables_by_type_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } + if (other.isSetPattern()) { + this.pattern = other.pattern; + } + if (other.isSetTableType()) { + this.tableType = other.tableType; + } } - public get_materialized_views_for_rewriting_args deepCopy() { - return new get_materialized_views_for_rewriting_args(this); + public get_tables_by_type_args deepCopy() { + return new get_tables_by_type_args(this); } @Override public void clear() { this.db_name = null; + this.pattern = null; + this.tableType = null; } public String getDb_name() { @@ -58278,6 +58522,52 @@ public void setDb_nameIsSet(boolean value) { } } + public String getPattern() { + return this.pattern; + } + + public void setPattern(String pattern) { + this.pattern = pattern; + } + + public void unsetPattern() { + this.pattern = null; + } + + /** Returns true if field pattern is set (has been assigned a value) and false otherwise */ + public boolean isSetPattern() { + return this.pattern != null; + } + + public void setPatternIsSet(boolean value) { + if (!value) { + this.pattern = null; + } + } + + public String getTableType() { + return this.tableType; + } + + public void setTableType(String tableType) { + this.tableType = tableType; + } + + public void unsetTableType() { + this.tableType = null; + } + + /** Returns true if field tableType is set (has been assigned a value) and false otherwise */ + public boolean isSetTableType() { + return this.tableType != null; + } + + public void setTableTypeIsSet(boolean value) { + if (!value) { + this.tableType = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -58288,6 +58578,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case PATTERN: + if (value == null) { + unsetPattern(); + } else { + setPattern((String)value); + } + break; + + case TABLE_TYPE: + if (value == null) { + unsetTableType(); + } else { + setTableType((String)value); + } + break; + } } @@ -58296,6 +58602,12 @@ public Object getFieldValue(_Fields field) { case DB_NAME: return getDb_name(); + case PATTERN: + return getPattern(); + + case TABLE_TYPE: + return getTableType(); + } throw new IllegalStateException(); } @@ -58309,6 +58621,10 @@ public boolean isSet(_Fields field) { switch (field) { case DB_NAME: return isSetDb_name(); + case PATTERN: + return isSetPattern(); + case TABLE_TYPE: + return isSetTableType(); } throw new IllegalStateException(); } @@ -58317,12 +58633,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_materialized_views_for_rewriting_args) - return this.equals((get_materialized_views_for_rewriting_args)that); + if (that instanceof get_tables_by_type_args) + return this.equals((get_tables_by_type_args)that); return false; } - public boolean equals(get_materialized_views_for_rewriting_args that) { + public boolean equals(get_tables_by_type_args that) { if (that == null) return false; @@ -58335,6 +58651,24 @@ public boolean equals(get_materialized_views_for_rewriting_args that) { return false; } + boolean this_present_pattern = true && this.isSetPattern(); + boolean that_present_pattern = true && that.isSetPattern(); + if (this_present_pattern || that_present_pattern) { + if (!(this_present_pattern && that_present_pattern)) + return false; + if (!this.pattern.equals(that.pattern)) + return false; + } + + boolean this_present_tableType = true && this.isSetTableType(); + boolean that_present_tableType = true && that.isSetTableType(); + if (this_present_tableType || that_present_tableType) { + if (!(this_present_tableType && that_present_tableType)) + return false; + if (!this.tableType.equals(that.tableType)) + return false; + } + return true; } @@ -58347,11 +58681,21 @@ public int hashCode() { if (present_db_name) list.add(db_name); + boolean present_pattern = true && (isSetPattern()); + list.add(present_pattern); + if (present_pattern) + list.add(pattern); + + boolean present_tableType = true && (isSetTableType()); + list.add(present_tableType); + if (present_tableType) + list.add(tableType); + return list.hashCode(); } @Override - public int compareTo(get_materialized_views_for_rewriting_args other) { + public int compareTo(get_tables_by_type_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -58368,6 +58712,26 @@ public int compareTo(get_materialized_views_for_rewriting_args other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetPattern()).compareTo(other.isSetPattern()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPattern()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pattern, other.pattern); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableType()).compareTo(other.isSetTableType()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableType()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableType, other.tableType); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -58385,7 +58749,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_materialized_views_for_rewriting_args("); + StringBuilder sb = new StringBuilder("get_tables_by_type_args("); boolean first = true; sb.append("db_name:"); @@ -58395,6 +58759,22 @@ public String toString() { sb.append(this.db_name); } first = false; + if (!first) sb.append(", "); + sb.append("pattern:"); + if (this.pattern == null) { + sb.append("null"); + } else { + sb.append(this.pattern); + } + first = false; + if (!first) sb.append(", "); + sb.append("tableType:"); + if (this.tableType == null) { + sb.append("null"); + } else { + sb.append(this.tableType); + } + first = false; sb.append(")"); return sb.toString(); } @@ -58420,15 +58800,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_materialized_views_for_rewriting_argsStandardSchemeFactory implements SchemeFactory { - public get_materialized_views_for_rewriting_argsStandardScheme getScheme() { - return new get_materialized_views_for_rewriting_argsStandardScheme(); + private static class get_tables_by_type_argsStandardSchemeFactory implements SchemeFactory { + public get_tables_by_type_argsStandardScheme getScheme() { + return new get_tables_by_type_argsStandardScheme(); } } - private static class get_materialized_views_for_rewriting_argsStandardScheme extends StandardScheme { + private static class get_tables_by_type_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -58446,6 +58826,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // PATTERN + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.pattern = iprot.readString(); + struct.setPatternIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TABLE_TYPE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableType = iprot.readString(); + struct.setTableTypeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -58455,7 +58851,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -58464,56 +58860,86 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeString(struct.db_name); oprot.writeFieldEnd(); } + if (struct.pattern != null) { + oprot.writeFieldBegin(PATTERN_FIELD_DESC); + oprot.writeString(struct.pattern); + oprot.writeFieldEnd(); + } + if (struct.tableType != null) { + oprot.writeFieldBegin(TABLE_TYPE_FIELD_DESC); + oprot.writeString(struct.tableType); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_materialized_views_for_rewriting_argsTupleSchemeFactory implements SchemeFactory { - public get_materialized_views_for_rewriting_argsTupleScheme getScheme() { - return new get_materialized_views_for_rewriting_argsTupleScheme(); + private static class get_tables_by_type_argsTupleSchemeFactory implements SchemeFactory { + public get_tables_by_type_argsTupleScheme getScheme() { + return new get_tables_by_type_argsTupleScheme(); } } - private static class get_materialized_views_for_rewriting_argsTupleScheme extends TupleScheme { + private static class get_tables_by_type_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetPattern()) { + optionals.set(1); + } + if (struct.isSetTableType()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } + if (struct.isSetPattern()) { + oprot.writeString(struct.pattern); + } + if (struct.isSetTableType()) { + oprot.writeString(struct.tableType); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); } + if (incoming.get(1)) { + struct.pattern = iprot.readString(); + struct.setPatternIsSet(true); + } + if (incoming.get(2)) { + struct.tableType = iprot.readString(); + struct.setTableTypeIsSet(true); + } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialized_views_for_rewriting_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialized_views_for_rewriting_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables_by_type_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_tables_by_type_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_materialized_views_for_rewriting_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_materialized_views_for_rewriting_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_tables_by_type_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_tables_by_type_resultTupleSchemeFactory()); } private List success; // required @@ -58590,13 +59016,13 @@ public String getFieldName() { tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialized_views_for_rewriting_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_tables_by_type_result.class, metaDataMap); } - public get_materialized_views_for_rewriting_result() { + public get_tables_by_type_result() { } - public get_materialized_views_for_rewriting_result( + public get_tables_by_type_result( List success, MetaException o1) { @@ -58608,7 +59034,7 @@ public get_materialized_views_for_rewriting_result( /** * Performs a deep copy on other. */ - public get_materialized_views_for_rewriting_result(get_materialized_views_for_rewriting_result other) { + public get_tables_by_type_result(get_tables_by_type_result other) { if (other.isSetSuccess()) { List __this__success = new ArrayList(other.success); this.success = __this__success; @@ -58618,8 +59044,8 @@ public get_materialized_views_for_rewriting_result(get_materialized_views_for_re } } - public get_materialized_views_for_rewriting_result deepCopy() { - return new get_materialized_views_for_rewriting_result(this); + public get_tables_by_type_result deepCopy() { + return new get_tables_by_type_result(this); } @Override @@ -58741,12 +59167,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_materialized_views_for_rewriting_result) - return this.equals((get_materialized_views_for_rewriting_result)that); + if (that instanceof get_tables_by_type_result) + return this.equals((get_tables_by_type_result)that); return false; } - public boolean equals(get_materialized_views_for_rewriting_result that) { + public boolean equals(get_tables_by_type_result that) { if (that == null) return false; @@ -58789,7 +59215,7 @@ public int hashCode() { } @Override - public int compareTo(get_materialized_views_for_rewriting_result other) { + public int compareTo(get_tables_by_type_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -58833,7 +59259,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_materialized_views_for_rewriting_result("); + StringBuilder sb = new StringBuilder("get_tables_by_type_result("); boolean first = true; sb.append("success:"); @@ -58876,15 +59302,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_materialized_views_for_rewriting_resultStandardSchemeFactory implements SchemeFactory { - public get_materialized_views_for_rewriting_resultStandardScheme getScheme() { - return new get_materialized_views_for_rewriting_resultStandardScheme(); + private static class get_tables_by_type_resultStandardSchemeFactory implements SchemeFactory { + public get_tables_by_type_resultStandardScheme getScheme() { + return new get_tables_by_type_resultStandardScheme(); } } - private static class get_materialized_views_for_rewriting_resultStandardScheme extends StandardScheme { + private static class get_tables_by_type_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -58897,13 +59323,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list972 = iprot.readListBegin(); - struct.success = new ArrayList(_list972.size); - String _elem973; - for (int _i974 = 0; _i974 < _list972.size; ++_i974) + org.apache.thrift.protocol.TList _list988 = iprot.readListBegin(); + struct.success = new ArrayList(_list988.size); + String _elem989; + for (int _i990 = 0; _i990 < _list988.size; ++_i990) { - _elem973 = iprot.readString(); - struct.success.add(_elem973); + _elem989 = iprot.readString(); + struct.success.add(_elem989); } iprot.readListEnd(); } @@ -58930,7 +59356,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -58938,9 +59364,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter975 : struct.success) + for (String _iter991 : struct.success) { - oprot.writeString(_iter975); + oprot.writeString(_iter991); } oprot.writeListEnd(); } @@ -58957,16 +59383,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v } - private static class get_materialized_views_for_rewriting_resultTupleSchemeFactory implements SchemeFactory { - public get_materialized_views_for_rewriting_resultTupleScheme getScheme() { - return new get_materialized_views_for_rewriting_resultTupleScheme(); + private static class get_tables_by_type_resultTupleSchemeFactory implements SchemeFactory { + public get_tables_by_type_resultTupleScheme getScheme() { + return new get_tables_by_type_resultTupleScheme(); } } - private static class get_materialized_views_for_rewriting_resultTupleScheme extends TupleScheme { + private static class get_tables_by_type_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -58979,9 +59405,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter976 : struct.success) + for (String _iter992 : struct.success) { - oprot.writeString(_iter976); + oprot.writeString(_iter992); } } } @@ -58991,18 +59417,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list977 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list977.size); - String _elem978; - for (int _i979 = 0; _i979 < _list977.size; ++_i979) + org.apache.thrift.protocol.TList _list993 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list993.size); + String _elem994; + for (int _i995 = 0; _i995 < _list993.size; ++_i995) { - _elem978 = iprot.readString(); - struct.success.add(_elem978); + _elem994 = iprot.readString(); + struct.success.add(_elem994); } } struct.setSuccessIsSet(true); @@ -59017,28 +59443,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_meta_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_meta_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialized_views_for_rewriting_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialized_views_for_rewriting_args"); - private static final org.apache.thrift.protocol.TField DB_PATTERNS_FIELD_DESC = new org.apache.thrift.protocol.TField("db_patterns", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_PATTERNS_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_patterns", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField TBL_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_types", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_table_meta_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_table_meta_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_materialized_views_for_rewriting_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_materialized_views_for_rewriting_argsTupleSchemeFactory()); } - private String db_patterns; // required - private String tbl_patterns; // required - private List tbl_types; // required + private String db_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_PATTERNS((short)1, "db_patterns"), - TBL_PATTERNS((short)2, "tbl_patterns"), - TBL_TYPES((short)3, "tbl_types"); + DB_NAME((short)1, "db_name"); private static final Map byName = new HashMap(); @@ -59053,12 +59473,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_PATTERNS - return DB_PATTERNS; - case 2: // TBL_PATTERNS - return TBL_PATTERNS; - case 3: // TBL_TYPES - return TBL_TYPES; + case 1: // DB_NAME + return DB_NAME; default: return null; } @@ -59102,165 +59518,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_PATTERNS, new org.apache.thrift.meta_data.FieldMetaData("db_patterns", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_PATTERNS, new org.apache.thrift.meta_data.FieldMetaData("tbl_patterns", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_TYPES, new org.apache.thrift.meta_data.FieldMetaData("tbl_types", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_meta_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialized_views_for_rewriting_args.class, metaDataMap); } - public get_table_meta_args() { + public get_materialized_views_for_rewriting_args() { } - public get_table_meta_args( - String db_patterns, - String tbl_patterns, - List tbl_types) + public get_materialized_views_for_rewriting_args( + String db_name) { this(); - this.db_patterns = db_patterns; - this.tbl_patterns = tbl_patterns; - this.tbl_types = tbl_types; + this.db_name = db_name; } /** * Performs a deep copy on other. */ - public get_table_meta_args(get_table_meta_args other) { - if (other.isSetDb_patterns()) { - this.db_patterns = other.db_patterns; - } - if (other.isSetTbl_patterns()) { - this.tbl_patterns = other.tbl_patterns; - } - if (other.isSetTbl_types()) { - List __this__tbl_types = new ArrayList(other.tbl_types); - this.tbl_types = __this__tbl_types; + public get_materialized_views_for_rewriting_args(get_materialized_views_for_rewriting_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; } } - public get_table_meta_args deepCopy() { - return new get_table_meta_args(this); + public get_materialized_views_for_rewriting_args deepCopy() { + return new get_materialized_views_for_rewriting_args(this); } @Override public void clear() { - this.db_patterns = null; - this.tbl_patterns = null; - this.tbl_types = null; - } - - public String getDb_patterns() { - return this.db_patterns; - } - - public void setDb_patterns(String db_patterns) { - this.db_patterns = db_patterns; - } - - public void unsetDb_patterns() { - this.db_patterns = null; - } - - /** Returns true if field db_patterns is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_patterns() { - return this.db_patterns != null; - } - - public void setDb_patternsIsSet(boolean value) { - if (!value) { - this.db_patterns = null; - } - } - - public String getTbl_patterns() { - return this.tbl_patterns; - } - - public void setTbl_patterns(String tbl_patterns) { - this.tbl_patterns = tbl_patterns; - } - - public void unsetTbl_patterns() { - this.tbl_patterns = null; - } - - /** Returns true if field tbl_patterns is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_patterns() { - return this.tbl_patterns != null; - } - - public void setTbl_patternsIsSet(boolean value) { - if (!value) { - this.tbl_patterns = null; - } - } - - public int getTbl_typesSize() { - return (this.tbl_types == null) ? 0 : this.tbl_types.size(); - } - - public java.util.Iterator getTbl_typesIterator() { - return (this.tbl_types == null) ? null : this.tbl_types.iterator(); - } - - public void addToTbl_types(String elem) { - if (this.tbl_types == null) { - this.tbl_types = new ArrayList(); - } - this.tbl_types.add(elem); + this.db_name = null; } - public List getTbl_types() { - return this.tbl_types; + public String getDb_name() { + return this.db_name; } - public void setTbl_types(List tbl_types) { - this.tbl_types = tbl_types; + public void setDb_name(String db_name) { + this.db_name = db_name; } - public void unsetTbl_types() { - this.tbl_types = null; + public void unsetDb_name() { + this.db_name = null; } - /** Returns true if field tbl_types is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_types() { - return this.tbl_types != null; + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; } - public void setTbl_typesIsSet(boolean value) { + public void setDb_nameIsSet(boolean value) { if (!value) { - this.tbl_types = null; + this.db_name = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_PATTERNS: - if (value == null) { - unsetDb_patterns(); - } else { - setDb_patterns((String)value); - } - break; - - case TBL_PATTERNS: - if (value == null) { - unsetTbl_patterns(); - } else { - setTbl_patterns((String)value); - } - break; - - case TBL_TYPES: + case DB_NAME: if (value == null) { - unsetTbl_types(); + unsetDb_name(); } else { - setTbl_types((List)value); + setDb_name((String)value); } break; @@ -59269,14 +59590,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_PATTERNS: - return getDb_patterns(); - - case TBL_PATTERNS: - return getTbl_patterns(); - - case TBL_TYPES: - return getTbl_types(); + case DB_NAME: + return getDb_name(); } throw new IllegalStateException(); @@ -59289,12 +59604,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_PATTERNS: - return isSetDb_patterns(); - case TBL_PATTERNS: - return isSetTbl_patterns(); - case TBL_TYPES: - return isSetTbl_types(); + case DB_NAME: + return isSetDb_name(); } throw new IllegalStateException(); } @@ -59303,39 +59614,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_table_meta_args) - return this.equals((get_table_meta_args)that); + if (that instanceof get_materialized_views_for_rewriting_args) + return this.equals((get_materialized_views_for_rewriting_args)that); return false; } - public boolean equals(get_table_meta_args that) { + public boolean equals(get_materialized_views_for_rewriting_args that) { if (that == null) return false; - boolean this_present_db_patterns = true && this.isSetDb_patterns(); - boolean that_present_db_patterns = true && that.isSetDb_patterns(); - if (this_present_db_patterns || that_present_db_patterns) { - if (!(this_present_db_patterns && that_present_db_patterns)) - return false; - if (!this.db_patterns.equals(that.db_patterns)) - return false; - } - - boolean this_present_tbl_patterns = true && this.isSetTbl_patterns(); - boolean that_present_tbl_patterns = true && that.isSetTbl_patterns(); - if (this_present_tbl_patterns || that_present_tbl_patterns) { - if (!(this_present_tbl_patterns && that_present_tbl_patterns)) - return false; - if (!this.tbl_patterns.equals(that.tbl_patterns)) - return false; - } - - boolean this_present_tbl_types = true && this.isSetTbl_types(); - boolean that_present_tbl_types = true && that.isSetTbl_types(); - if (this_present_tbl_types || that_present_tbl_types) { - if (!(this_present_tbl_types && that_present_tbl_types)) + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) return false; - if (!this.tbl_types.equals(that.tbl_types)) + if (!this.db_name.equals(that.db_name)) return false; } @@ -59346,58 +59639,28 @@ public boolean equals(get_table_meta_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_patterns = true && (isSetDb_patterns()); - list.add(present_db_patterns); - if (present_db_patterns) - list.add(db_patterns); - - boolean present_tbl_patterns = true && (isSetTbl_patterns()); - list.add(present_tbl_patterns); - if (present_tbl_patterns) - list.add(tbl_patterns); - - boolean present_tbl_types = true && (isSetTbl_types()); - list.add(present_tbl_types); - if (present_tbl_types) - list.add(tbl_types); + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); return list.hashCode(); } @Override - public int compareTo(get_table_meta_args other) { + public int compareTo(get_materialized_views_for_rewriting_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_patterns()).compareTo(other.isSetDb_patterns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDb_patterns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_patterns, other.db_patterns); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTbl_patterns()).compareTo(other.isSetTbl_patterns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTbl_patterns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_patterns, other.tbl_patterns); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTbl_types()).compareTo(other.isSetTbl_types()); + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetTbl_types()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_types, other.tbl_types); + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); if (lastComparison != 0) { return lastComparison; } @@ -59419,30 +59682,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_table_meta_args("); + StringBuilder sb = new StringBuilder("get_materialized_views_for_rewriting_args("); boolean first = true; - sb.append("db_patterns:"); - if (this.db_patterns == null) { - sb.append("null"); - } else { - sb.append(this.db_patterns); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_patterns:"); - if (this.tbl_patterns == null) { - sb.append("null"); - } else { - sb.append(this.tbl_patterns); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_types:"); - if (this.tbl_types == null) { + sb.append("db_name:"); + if (this.db_name == null) { sb.append("null"); } else { - sb.append(this.tbl_types); + sb.append(this.db_name); } first = false; sb.append(")"); @@ -59470,15 +59717,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_table_meta_argsStandardSchemeFactory implements SchemeFactory { - public get_table_meta_argsStandardScheme getScheme() { - return new get_table_meta_argsStandardScheme(); + private static class get_materialized_views_for_rewriting_argsStandardSchemeFactory implements SchemeFactory { + public get_materialized_views_for_rewriting_argsStandardScheme getScheme() { + return new get_materialized_views_for_rewriting_argsStandardScheme(); } } - private static class get_table_meta_argsStandardScheme extends StandardScheme { + private static class get_materialized_views_for_rewriting_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -59488,36 +59735,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args break; } switch (schemeField.id) { - case 1: // DB_PATTERNS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_patterns = iprot.readString(); - struct.setDb_patternsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TBL_PATTERNS + case 1: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_patterns = iprot.readString(); - struct.setTbl_patternsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // TBL_TYPES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list980 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list980.size); - String _elem981; - for (int _i982 = 0; _i982 < _list980.size; ++_i982) - { - _elem981 = iprot.readString(); - struct.tbl_types.add(_elem981); - } - iprot.readListEnd(); - } - struct.setTbl_typesIsSet(true); + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -59531,30 +59752,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_patterns != null) { - oprot.writeFieldBegin(DB_PATTERNS_FIELD_DESC); - oprot.writeString(struct.db_patterns); - oprot.writeFieldEnd(); - } - if (struct.tbl_patterns != null) { - oprot.writeFieldBegin(TBL_PATTERNS_FIELD_DESC); - oprot.writeString(struct.tbl_patterns); - oprot.writeFieldEnd(); - } - if (struct.tbl_types != null) { - oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter983 : struct.tbl_types) - { - oprot.writeString(_iter983); - } - oprot.writeListEnd(); - } + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -59563,88 +59767,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg } - private static class get_table_meta_argsTupleSchemeFactory implements SchemeFactory { - public get_table_meta_argsTupleScheme getScheme() { - return new get_table_meta_argsTupleScheme(); + private static class get_materialized_views_for_rewriting_argsTupleSchemeFactory implements SchemeFactory { + public get_materialized_views_for_rewriting_argsTupleScheme getScheme() { + return new get_materialized_views_for_rewriting_argsTupleScheme(); } } - private static class get_table_meta_argsTupleScheme extends TupleScheme { + private static class get_materialized_views_for_rewriting_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_patterns()) { + if (struct.isSetDb_name()) { optionals.set(0); } - if (struct.isSetTbl_patterns()) { - optionals.set(1); - } - if (struct.isSetTbl_types()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetDb_patterns()) { - oprot.writeString(struct.db_patterns); - } - if (struct.isSetTbl_patterns()) { - oprot.writeString(struct.tbl_patterns); - } - if (struct.isSetTbl_types()) { - { - oprot.writeI32(struct.tbl_types.size()); - for (String _iter984 : struct.tbl_types) - { - oprot.writeString(_iter984); - } - } + oprot.writeBitSet(optionals, 1); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.db_patterns = iprot.readString(); - struct.setDb_patternsIsSet(true); - } - if (incoming.get(1)) { - struct.tbl_patterns = iprot.readString(); - struct.setTbl_patternsIsSet(true); - } - if (incoming.get(2)) { - { - org.apache.thrift.protocol.TList _list985 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list985.size); - String _elem986; - for (int _i987 = 0; _i987 < _list985.size; ++_i987) - { - _elem986 = iprot.readString(); - struct.tbl_types.add(_elem986); - } - } - struct.setTbl_typesIsSet(true); + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_meta_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_meta_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialized_views_for_rewriting_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialized_views_for_rewriting_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_table_meta_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_table_meta_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_materialized_views_for_rewriting_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_materialized_views_for_rewriting_resultTupleSchemeFactory()); } - private List success; // required + private List success; // required private MetaException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -59714,18 +59883,18 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableMeta.class)))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_meta_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialized_views_for_rewriting_result.class, metaDataMap); } - public get_table_meta_result() { + public get_materialized_views_for_rewriting_result() { } - public get_table_meta_result( - List success, + public get_materialized_views_for_rewriting_result( + List success, MetaException o1) { this(); @@ -59736,12 +59905,9 @@ public get_table_meta_result( /** * Performs a deep copy on other. */ - public get_table_meta_result(get_table_meta_result other) { + public get_materialized_views_for_rewriting_result(get_materialized_views_for_rewriting_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (TableMeta other_element : other.success) { - __this__success.add(new TableMeta(other_element)); - } + List __this__success = new ArrayList(other.success); this.success = __this__success; } if (other.isSetO1()) { @@ -59749,8 +59915,8 @@ public get_table_meta_result(get_table_meta_result other) { } } - public get_table_meta_result deepCopy() { - return new get_table_meta_result(this); + public get_materialized_views_for_rewriting_result deepCopy() { + return new get_materialized_views_for_rewriting_result(this); } @Override @@ -59763,22 +59929,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(TableMeta elem) { + public void addToSuccess(String elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -59826,7 +59992,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -59872,12 +60038,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_table_meta_result) - return this.equals((get_table_meta_result)that); + if (that instanceof get_materialized_views_for_rewriting_result) + return this.equals((get_materialized_views_for_rewriting_result)that); return false; } - public boolean equals(get_table_meta_result that) { + public boolean equals(get_materialized_views_for_rewriting_result that) { if (that == null) return false; @@ -59920,7 +60086,7 @@ public int hashCode() { } @Override - public int compareTo(get_table_meta_result other) { + public int compareTo(get_materialized_views_for_rewriting_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -59964,7 +60130,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_table_meta_result("); + StringBuilder sb = new StringBuilder("get_materialized_views_for_rewriting_result("); boolean first = true; sb.append("success:"); @@ -60007,15 +60173,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_table_meta_resultStandardSchemeFactory implements SchemeFactory { - public get_table_meta_resultStandardScheme getScheme() { - return new get_table_meta_resultStandardScheme(); + private static class get_materialized_views_for_rewriting_resultStandardSchemeFactory implements SchemeFactory { + public get_materialized_views_for_rewriting_resultStandardScheme getScheme() { + return new get_materialized_views_for_rewriting_resultStandardScheme(); } } - private static class get_table_meta_resultStandardScheme extends StandardScheme { + private static class get_materialized_views_for_rewriting_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -60028,14 +60194,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list988 = iprot.readListBegin(); - struct.success = new ArrayList(_list988.size); - TableMeta _elem989; - for (int _i990 = 0; _i990 < _list988.size; ++_i990) + org.apache.thrift.protocol.TList _list996 = iprot.readListBegin(); + struct.success = new ArrayList(_list996.size); + String _elem997; + for (int _i998 = 0; _i998 < _list996.size; ++_i998) { - _elem989 = new TableMeta(); - _elem989.read(iprot); - struct.success.add(_elem989); + _elem997 = iprot.readString(); + struct.success.add(_elem997); } iprot.readListEnd(); } @@ -60062,17 +60227,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter991 : struct.success) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter999 : struct.success) { - _iter991.write(oprot); + oprot.writeString(_iter999); } oprot.writeListEnd(); } @@ -60089,16 +60254,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res } - private static class get_table_meta_resultTupleSchemeFactory implements SchemeFactory { - public get_table_meta_resultTupleScheme getScheme() { - return new get_table_meta_resultTupleScheme(); + private static class get_materialized_views_for_rewriting_resultTupleSchemeFactory implements SchemeFactory { + public get_materialized_views_for_rewriting_resultTupleScheme getScheme() { + return new get_materialized_views_for_rewriting_resultTupleScheme(); } } - private static class get_table_meta_resultTupleScheme extends TupleScheme { + private static class get_materialized_views_for_rewriting_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -60111,9 +60276,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter992 : struct.success) + for (String _iter1000 : struct.success) { - _iter992.write(oprot); + oprot.writeString(_iter1000); } } } @@ -60123,19 +60288,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_views_for_rewriting_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list993 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list993.size); - TableMeta _elem994; - for (int _i995 = 0; _i995 < _list993.size; ++_i995) + org.apache.thrift.protocol.TList _list1001 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1001.size); + String _elem1002; + for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003) { - _elem994 = new TableMeta(); - _elem994.read(iprot); - struct.success.add(_elem994); + _elem1002 = iprot.readString(); + struct.success.add(_elem1002); } } struct.setSuccessIsSet(true); @@ -60150,22 +60314,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_tables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_tables_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_meta_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_meta_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_PATTERNS_FIELD_DESC = new org.apache.thrift.protocol.TField("db_patterns", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_PATTERNS_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_patterns", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_types", org.apache.thrift.protocol.TType.LIST, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_all_tables_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_all_tables_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_table_meta_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_table_meta_argsTupleSchemeFactory()); } - private String db_name; // required + private String db_patterns; // required + private String tbl_patterns; // required + private List tbl_types; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"); + DB_PATTERNS((short)1, "db_patterns"), + TBL_PATTERNS((short)2, "tbl_patterns"), + TBL_TYPES((short)3, "tbl_types"); private static final Map byName = new HashMap(); @@ -60180,8 +60350,12 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; + case 1: // DB_PATTERNS + return DB_PATTERNS; + case 2: // TBL_PATTERNS + return TBL_PATTERNS; + case 3: // TBL_TYPES + return TBL_TYPES; default: return null; } @@ -60225,70 +60399,165 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.DB_PATTERNS, new org.apache.thrift.meta_data.FieldMetaData("db_patterns", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_PATTERNS, new org.apache.thrift.meta_data.FieldMetaData("tbl_patterns", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_TYPES, new org.apache.thrift.meta_data.FieldMetaData("tbl_types", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_tables_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_meta_args.class, metaDataMap); } - public get_all_tables_args() { + public get_table_meta_args() { } - public get_all_tables_args( - String db_name) + public get_table_meta_args( + String db_patterns, + String tbl_patterns, + List tbl_types) { this(); - this.db_name = db_name; + this.db_patterns = db_patterns; + this.tbl_patterns = tbl_patterns; + this.tbl_types = tbl_types; } /** * Performs a deep copy on other. */ - public get_all_tables_args(get_all_tables_args other) { - if (other.isSetDb_name()) { - this.db_name = other.db_name; + public get_table_meta_args(get_table_meta_args other) { + if (other.isSetDb_patterns()) { + this.db_patterns = other.db_patterns; + } + if (other.isSetTbl_patterns()) { + this.tbl_patterns = other.tbl_patterns; + } + if (other.isSetTbl_types()) { + List __this__tbl_types = new ArrayList(other.tbl_types); + this.tbl_types = __this__tbl_types; } } - public get_all_tables_args deepCopy() { - return new get_all_tables_args(this); + public get_table_meta_args deepCopy() { + return new get_table_meta_args(this); } @Override public void clear() { - this.db_name = null; + this.db_patterns = null; + this.tbl_patterns = null; + this.tbl_types = null; } - public String getDb_name() { - return this.db_name; + public String getDb_patterns() { + return this.db_patterns; } - public void setDb_name(String db_name) { - this.db_name = db_name; + public void setDb_patterns(String db_patterns) { + this.db_patterns = db_patterns; } - public void unsetDb_name() { - this.db_name = null; + public void unsetDb_patterns() { + this.db_patterns = null; } - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; + /** Returns true if field db_patterns is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_patterns() { + return this.db_patterns != null; } - public void setDb_nameIsSet(boolean value) { + public void setDb_patternsIsSet(boolean value) { if (!value) { - this.db_name = null; + this.db_patterns = null; + } + } + + public String getTbl_patterns() { + return this.tbl_patterns; + } + + public void setTbl_patterns(String tbl_patterns) { + this.tbl_patterns = tbl_patterns; + } + + public void unsetTbl_patterns() { + this.tbl_patterns = null; + } + + /** Returns true if field tbl_patterns is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_patterns() { + return this.tbl_patterns != null; + } + + public void setTbl_patternsIsSet(boolean value) { + if (!value) { + this.tbl_patterns = null; + } + } + + public int getTbl_typesSize() { + return (this.tbl_types == null) ? 0 : this.tbl_types.size(); + } + + public java.util.Iterator getTbl_typesIterator() { + return (this.tbl_types == null) ? null : this.tbl_types.iterator(); + } + + public void addToTbl_types(String elem) { + if (this.tbl_types == null) { + this.tbl_types = new ArrayList(); + } + this.tbl_types.add(elem); + } + + public List getTbl_types() { + return this.tbl_types; + } + + public void setTbl_types(List tbl_types) { + this.tbl_types = tbl_types; + } + + public void unsetTbl_types() { + this.tbl_types = null; + } + + /** Returns true if field tbl_types is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_types() { + return this.tbl_types != null; + } + + public void setTbl_typesIsSet(boolean value) { + if (!value) { + this.tbl_types = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: + case DB_PATTERNS: if (value == null) { - unsetDb_name(); + unsetDb_patterns(); } else { - setDb_name((String)value); + setDb_patterns((String)value); + } + break; + + case TBL_PATTERNS: + if (value == null) { + unsetTbl_patterns(); + } else { + setTbl_patterns((String)value); + } + break; + + case TBL_TYPES: + if (value == null) { + unsetTbl_types(); + } else { + setTbl_types((List)value); } break; @@ -60297,8 +60566,14 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); + case DB_PATTERNS: + return getDb_patterns(); + + case TBL_PATTERNS: + return getTbl_patterns(); + + case TBL_TYPES: + return getTbl_types(); } throw new IllegalStateException(); @@ -60311,8 +60586,12 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); + case DB_PATTERNS: + return isSetDb_patterns(); + case TBL_PATTERNS: + return isSetTbl_patterns(); + case TBL_TYPES: + return isSetTbl_types(); } throw new IllegalStateException(); } @@ -60321,21 +60600,39 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_all_tables_args) - return this.equals((get_all_tables_args)that); + if (that instanceof get_table_meta_args) + return this.equals((get_table_meta_args)that); return false; } - public boolean equals(get_all_tables_args that) { + public boolean equals(get_table_meta_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) + boolean this_present_db_patterns = true && this.isSetDb_patterns(); + boolean that_present_db_patterns = true && that.isSetDb_patterns(); + if (this_present_db_patterns || that_present_db_patterns) { + if (!(this_present_db_patterns && that_present_db_patterns)) return false; - if (!this.db_name.equals(that.db_name)) + if (!this.db_patterns.equals(that.db_patterns)) + return false; + } + + boolean this_present_tbl_patterns = true && this.isSetTbl_patterns(); + boolean that_present_tbl_patterns = true && that.isSetTbl_patterns(); + if (this_present_tbl_patterns || that_present_tbl_patterns) { + if (!(this_present_tbl_patterns && that_present_tbl_patterns)) + return false; + if (!this.tbl_patterns.equals(that.tbl_patterns)) + return false; + } + + boolean this_present_tbl_types = true && this.isSetTbl_types(); + boolean that_present_tbl_types = true && that.isSetTbl_types(); + if (this_present_tbl_types || that_present_tbl_types) { + if (!(this_present_tbl_types && that_present_tbl_types)) + return false; + if (!this.tbl_types.equals(that.tbl_types)) return false; } @@ -60346,28 +60643,58 @@ public boolean equals(get_all_tables_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); + boolean present_db_patterns = true && (isSetDb_patterns()); + list.add(present_db_patterns); + if (present_db_patterns) + list.add(db_patterns); + + boolean present_tbl_patterns = true && (isSetTbl_patterns()); + list.add(present_tbl_patterns); + if (present_tbl_patterns) + list.add(tbl_patterns); + + boolean present_tbl_types = true && (isSetTbl_types()); + list.add(present_tbl_types); + if (present_tbl_types) + list.add(tbl_types); return list.hashCode(); } @Override - public int compareTo(get_all_tables_args other) { + public int compareTo(get_table_meta_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + lastComparison = Boolean.valueOf(isSetDb_patterns()).compareTo(other.isSetDb_patterns()); if (lastComparison != 0) { return lastComparison; } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (isSetDb_patterns()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_patterns, other.db_patterns); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_patterns()).compareTo(other.isSetTbl_patterns()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_patterns()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_patterns, other.tbl_patterns); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_types()).compareTo(other.isSetTbl_types()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_types()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_types, other.tbl_types); if (lastComparison != 0) { return lastComparison; } @@ -60389,14 +60716,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_all_tables_args("); + StringBuilder sb = new StringBuilder("get_table_meta_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { + sb.append("db_patterns:"); + if (this.db_patterns == null) { sb.append("null"); } else { - sb.append(this.db_name); + sb.append(this.db_patterns); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_patterns:"); + if (this.tbl_patterns == null) { + sb.append("null"); + } else { + sb.append(this.tbl_patterns); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_types:"); + if (this.tbl_types == null) { + sb.append("null"); + } else { + sb.append(this.tbl_types); } first = false; sb.append(")"); @@ -60424,15 +60767,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_all_tables_argsStandardSchemeFactory implements SchemeFactory { - public get_all_tables_argsStandardScheme getScheme() { - return new get_all_tables_argsStandardScheme(); + private static class get_table_meta_argsStandardSchemeFactory implements SchemeFactory { + public get_table_meta_argsStandardScheme getScheme() { + return new get_table_meta_argsStandardScheme(); } } - private static class get_all_tables_argsStandardScheme extends StandardScheme { + private static class get_table_meta_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -60442,10 +60785,36 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_args break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // DB_PATTERNS if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); + struct.db_patterns = iprot.readString(); + struct.setDb_patternsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_PATTERNS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_patterns = iprot.readString(); + struct.setTbl_patternsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TBL_TYPES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1004.size); + String _elem1005; + for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) + { + _elem1005 = iprot.readString(); + struct.tbl_types.add(_elem1005); + } + iprot.readListEnd(); + } + struct.setTbl_typesIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -60459,13 +60828,30 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_args struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); + if (struct.db_patterns != null) { + oprot.writeFieldBegin(DB_PATTERNS_FIELD_DESC); + oprot.writeString(struct.db_patterns); + oprot.writeFieldEnd(); + } + if (struct.tbl_patterns != null) { + oprot.writeFieldBegin(TBL_PATTERNS_FIELD_DESC); + oprot.writeString(struct.tbl_patterns); + oprot.writeFieldEnd(); + } + if (struct.tbl_types != null) { + oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); + for (String _iter1007 : struct.tbl_types) + { + oprot.writeString(_iter1007); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -60474,53 +60860,88 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_arg } - private static class get_all_tables_argsTupleSchemeFactory implements SchemeFactory { - public get_all_tables_argsTupleScheme getScheme() { - return new get_all_tables_argsTupleScheme(); + private static class get_table_meta_argsTupleSchemeFactory implements SchemeFactory { + public get_table_meta_argsTupleScheme getScheme() { + return new get_table_meta_argsTupleScheme(); } } - private static class get_all_tables_argsTupleScheme extends TupleScheme { + private static class get_table_meta_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetDb_patterns()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); + if (struct.isSetTbl_patterns()) { + optionals.set(1); + } + if (struct.isSetTbl_types()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetDb_patterns()) { + oprot.writeString(struct.db_patterns); + } + if (struct.isSetTbl_patterns()) { + oprot.writeString(struct.tbl_patterns); + } + if (struct.isSetTbl_types()) { + { + oprot.writeI32(struct.tbl_types.size()); + for (String _iter1008 : struct.tbl_types) + { + oprot.writeString(_iter1008); + } + } } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); + struct.db_patterns = iprot.readString(); + struct.setDb_patternsIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_patterns = iprot.readString(); + struct.setTbl_patternsIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1009.size); + String _elem1010; + for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) + { + _elem1010 = iprot.readString(); + struct.tbl_types.add(_elem1010); + } + } + struct.setTbl_typesIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_tables_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_tables_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_meta_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_meta_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_all_tables_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_all_tables_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_table_meta_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_table_meta_resultTupleSchemeFactory()); } - private List success; // required + private List success; // required private MetaException o1; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -60590,18 +61011,18 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableMeta.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_tables_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_meta_result.class, metaDataMap); } - public get_all_tables_result() { + public get_table_meta_result() { } - public get_all_tables_result( - List success, + public get_table_meta_result( + List success, MetaException o1) { this(); @@ -60612,9 +61033,12 @@ public get_all_tables_result( /** * Performs a deep copy on other. */ - public get_all_tables_result(get_all_tables_result other) { + public get_table_meta_result(get_table_meta_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success); + List __this__success = new ArrayList(other.success.size()); + for (TableMeta other_element : other.success) { + __this__success.add(new TableMeta(other_element)); + } this.success = __this__success; } if (other.isSetO1()) { @@ -60622,8 +61046,8 @@ public get_all_tables_result(get_all_tables_result other) { } } - public get_all_tables_result deepCopy() { - return new get_all_tables_result(this); + public get_table_meta_result deepCopy() { + return new get_table_meta_result(this); } @Override @@ -60636,22 +61060,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(String elem) { + public void addToSuccess(TableMeta elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -60699,7 +61123,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -60745,12 +61169,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_all_tables_result) - return this.equals((get_all_tables_result)that); + if (that instanceof get_table_meta_result) + return this.equals((get_table_meta_result)that); return false; } - public boolean equals(get_all_tables_result that) { + public boolean equals(get_table_meta_result that) { if (that == null) return false; @@ -60793,7 +61217,7 @@ public int hashCode() { } @Override - public int compareTo(get_all_tables_result other) { + public int compareTo(get_table_meta_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -60837,7 +61261,880 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_all_tables_result("); + StringBuilder sb = new StringBuilder("get_table_meta_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_table_meta_resultStandardSchemeFactory implements SchemeFactory { + public get_table_meta_resultStandardScheme getScheme() { + return new get_table_meta_resultStandardScheme(); + } + } + + private static class get_table_meta_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); + struct.success = new ArrayList(_list1012.size); + TableMeta _elem1013; + for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) + { + _elem1013 = new TableMeta(); + _elem1013.read(iprot); + struct.success.add(_elem1013); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (TableMeta _iter1015 : struct.success) + { + _iter1015.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_table_meta_resultTupleSchemeFactory implements SchemeFactory { + public get_table_meta_resultTupleScheme getScheme() { + return new get_table_meta_resultTupleScheme(); + } + } + + private static class get_table_meta_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (TableMeta _iter1016 : struct.success) + { + _iter1016.write(oprot); + } + } + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1017.size); + TableMeta _elem1018; + for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) + { + _elem1018 = new TableMeta(); + _elem1018.read(iprot); + struct.success.add(_elem1018); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_tables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_tables_args"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_all_tables_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_all_tables_argsTupleSchemeFactory()); + } + + private String db_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "db_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_tables_args.class, metaDataMap); + } + + public get_all_tables_args() { + } + + public get_all_tables_args( + String db_name) + { + this(); + this.db_name = db_name; + } + + /** + * Performs a deep copy on other. + */ + public get_all_tables_args(get_all_tables_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + } + + public get_all_tables_args deepCopy() { + return new get_all_tables_args(this); + } + + @Override + public void clear() { + this.db_name = null; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_all_tables_args) + return this.equals((get_all_tables_args)that); + return false; + } + + public boolean equals(get_all_tables_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + return list.hashCode(); + } + + @Override + public int compareTo(get_all_tables_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_all_tables_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_all_tables_argsStandardSchemeFactory implements SchemeFactory { + public get_all_tables_argsStandardScheme getScheme() { + return new get_all_tables_argsStandardScheme(); + } + } + + private static class get_all_tables_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_all_tables_argsTupleSchemeFactory implements SchemeFactory { + public get_all_tables_argsTupleScheme getScheme() { + return new get_all_tables_argsTupleScheme(); + } + } + + private static class get_all_tables_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDb_name()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_all_tables_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_all_tables_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_all_tables_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_all_tables_resultTupleSchemeFactory()); + } + + private List success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_all_tables_result.class, metaDataMap); + } + + public get_all_tables_result() { + } + + public get_all_tables_result( + List success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_all_tables_result(get_all_tables_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success); + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public get_all_tables_result deepCopy() { + return new get_all_tables_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_all_tables_result) + return this.equals((get_all_tables_result)that); + return false; + } + + public boolean equals(get_all_tables_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(get_all_tables_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_all_tables_result("); boolean first = true; sb.append("success:"); @@ -60901,13 +62198,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list996 = iprot.readListBegin(); - struct.success = new ArrayList(_list996.size); - String _elem997; - for (int _i998 = 0; _i998 < _list996.size; ++_i998) + org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); + struct.success = new ArrayList(_list1020.size); + String _elem1021; + for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) { - _elem997 = iprot.readString(); - struct.success.add(_elem997); + _elem1021 = iprot.readString(); + struct.success.add(_elem1021); } iprot.readListEnd(); } @@ -60942,9 +62239,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter999 : struct.success) + for (String _iter1023 : struct.success) { - oprot.writeString(_iter999); + oprot.writeString(_iter1023); } oprot.writeListEnd(); } @@ -60983,9 +62280,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1000 : struct.success) + for (String _iter1024 : struct.success) { - oprot.writeString(_iter1000); + oprot.writeString(_iter1024); } } } @@ -61000,13 +62297,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1001 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1001.size); - String _elem1002; - for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003) + org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1025.size); + String _elem1026; + for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) { - _elem1002 = iprot.readString(); - struct.success.add(_elem1002); + _elem1026 = iprot.readString(); + struct.success.add(_elem1026); } } struct.setSuccessIsSet(true); @@ -62459,13 +63756,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1004.size); - String _elem1005; - for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) + org.apache.thrift.protocol.TList _list1028 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1028.size); + String _elem1029; + for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030) { - _elem1005 = iprot.readString(); - struct.tbl_names.add(_elem1005); + _elem1029 = iprot.readString(); + struct.tbl_names.add(_elem1029); } iprot.readListEnd(); } @@ -62496,9 +63793,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1007 : struct.tbl_names) + for (String _iter1031 : struct.tbl_names) { - oprot.writeString(_iter1007); + oprot.writeString(_iter1031); } oprot.writeListEnd(); } @@ -62535,9 +63832,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1008 : struct.tbl_names) + for (String _iter1032 : struct.tbl_names) { - oprot.writeString(_iter1008); + oprot.writeString(_iter1032); } } } @@ -62553,13 +63850,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1009.size); - String _elem1010; - for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) + org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1033.size); + String _elem1034; + for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) { - _elem1010 = iprot.readString(); - struct.tbl_names.add(_elem1010); + _elem1034 = iprot.readString(); + struct.tbl_names.add(_elem1034); } } struct.setTbl_namesIsSet(true); @@ -62884,14 +64181,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1012.size); - Table _elem1013; - for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) + org.apache.thrift.protocol.TList _list1036 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1036.size); + Table _elem1037; + for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) { - _elem1013 = new Table(); - _elem1013.read(iprot); - struct.success.add(_elem1013); + _elem1037 = new Table(); + _elem1037.read(iprot); + struct.success.add(_elem1037); } iprot.readListEnd(); } @@ -62917,9 +64214,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1015 : struct.success) + for (Table _iter1039 : struct.success) { - _iter1015.write(oprot); + _iter1039.write(oprot); } oprot.writeListEnd(); } @@ -62950,9 +64247,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1016 : struct.success) + for (Table _iter1040 : struct.success) { - _iter1016.write(oprot); + _iter1040.write(oprot); } } } @@ -62964,14 +64261,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1017.size); - Table _elem1018; - for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) + org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1041.size); + Table _elem1042; + for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) { - _elem1018 = new Table(); - _elem1018.read(iprot); - struct.success.add(_elem1018); + _elem1042 = new Table(); + _elem1042.read(iprot); + struct.success.add(_elem1042); } } struct.setSuccessIsSet(true); @@ -65364,13 +66661,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1020.size); - String _elem1021; - for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) + org.apache.thrift.protocol.TList _list1044 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1044.size); + String _elem1045; + for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046) { - _elem1021 = iprot.readString(); - struct.tbl_names.add(_elem1021); + _elem1045 = iprot.readString(); + struct.tbl_names.add(_elem1045); } iprot.readListEnd(); } @@ -65401,9 +66698,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1023 : struct.tbl_names) + for (String _iter1047 : struct.tbl_names) { - oprot.writeString(_iter1023); + oprot.writeString(_iter1047); } oprot.writeListEnd(); } @@ -65440,9 +66737,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1024 : struct.tbl_names) + for (String _iter1048 : struct.tbl_names) { - oprot.writeString(_iter1024); + oprot.writeString(_iter1048); } } } @@ -65458,13 +66755,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1025.size); - String _elem1026; - for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) + org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1049.size); + String _elem1050; + for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) { - _elem1026 = iprot.readString(); - struct.tbl_names.add(_elem1026); + _elem1050 = iprot.readString(); + struct.tbl_names.add(_elem1050); } } struct.setTbl_namesIsSet(true); @@ -66037,16 +67334,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1028 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1028.size); - String _key1029; - Materialization _val1030; - for (int _i1031 = 0; _i1031 < _map1028.size; ++_i1031) + org.apache.thrift.protocol.TMap _map1052 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1052.size); + String _key1053; + Materialization _val1054; + for (int _i1055 = 0; _i1055 < _map1052.size; ++_i1055) { - _key1029 = iprot.readString(); - _val1030 = new Materialization(); - _val1030.read(iprot); - struct.success.put(_key1029, _val1030); + _key1053 = iprot.readString(); + _val1054 = new Materialization(); + _val1054.read(iprot); + struct.success.put(_key1053, _val1054); } iprot.readMapEnd(); } @@ -66099,10 +67396,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1032 : struct.success.entrySet()) + for (Map.Entry _iter1056 : struct.success.entrySet()) { - oprot.writeString(_iter1032.getKey()); - _iter1032.getValue().write(oprot); + oprot.writeString(_iter1056.getKey()); + _iter1056.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -66157,10 +67454,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1033 : struct.success.entrySet()) + for (Map.Entry _iter1057 : struct.success.entrySet()) { - oprot.writeString(_iter1033.getKey()); - _iter1033.getValue().write(oprot); + oprot.writeString(_iter1057.getKey()); + _iter1057.getValue().write(oprot); } } } @@ -66181,16 +67478,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1034 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1034.size); - String _key1035; - Materialization _val1036; - for (int _i1037 = 0; _i1037 < _map1034.size; ++_i1037) + org.apache.thrift.protocol.TMap _map1058 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1058.size); + String _key1059; + Materialization _val1060; + for (int _i1061 = 0; _i1061 < _map1058.size; ++_i1061) { - _key1035 = iprot.readString(); - _val1036 = new Materialization(); - _val1036.read(iprot); - struct.success.put(_key1035, _val1036); + _key1059 = iprot.readString(); + _val1060 = new Materialization(); + _val1060.read(iprot); + struct.success.put(_key1059, _val1060); } } struct.setSuccessIsSet(true); @@ -68479,13 +69776,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin(); - struct.success = new ArrayList(_list1038.size); - String _elem1039; - for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040) + org.apache.thrift.protocol.TList _list1062 = iprot.readListBegin(); + struct.success = new ArrayList(_list1062.size); + String _elem1063; + for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064) { - _elem1039 = iprot.readString(); - struct.success.add(_elem1039); + _elem1063 = iprot.readString(); + struct.success.add(_elem1063); } iprot.readListEnd(); } @@ -68538,9 +69835,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1041 : struct.success) + for (String _iter1065 : struct.success) { - oprot.writeString(_iter1041); + oprot.writeString(_iter1065); } oprot.writeListEnd(); } @@ -68595,9 +69892,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1042 : struct.success) + for (String _iter1066 : struct.success) { - oprot.writeString(_iter1042); + oprot.writeString(_iter1066); } } } @@ -68618,13 +69915,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1043 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1043.size); - String _elem1044; - for (int _i1045 = 0; _i1045 < _list1043.size; ++_i1045) + org.apache.thrift.protocol.TList _list1067 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1067.size); + String _elem1068; + for (int _i1069 = 0; _i1069 < _list1067.size; ++_i1069) { - _elem1044 = iprot.readString(); - struct.success.add(_elem1044); + _elem1068 = iprot.readString(); + struct.success.add(_elem1068); } } struct.setSuccessIsSet(true); @@ -74483,14 +75780,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1046 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1046.size); - Partition _elem1047; - for (int _i1048 = 0; _i1048 < _list1046.size; ++_i1048) + org.apache.thrift.protocol.TList _list1070 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1070.size); + Partition _elem1071; + for (int _i1072 = 0; _i1072 < _list1070.size; ++_i1072) { - _elem1047 = new Partition(); - _elem1047.read(iprot); - struct.new_parts.add(_elem1047); + _elem1071 = new Partition(); + _elem1071.read(iprot); + struct.new_parts.add(_elem1071); } iprot.readListEnd(); } @@ -74516,9 +75813,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1049 : struct.new_parts) + for (Partition _iter1073 : struct.new_parts) { - _iter1049.write(oprot); + _iter1073.write(oprot); } oprot.writeListEnd(); } @@ -74549,9 +75846,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1050 : struct.new_parts) + for (Partition _iter1074 : struct.new_parts) { - _iter1050.write(oprot); + _iter1074.write(oprot); } } } @@ -74563,14 +75860,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1051 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1051.size); - Partition _elem1052; - for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053) + org.apache.thrift.protocol.TList _list1075 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1075.size); + Partition _elem1076; + for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) { - _elem1052 = new Partition(); - _elem1052.read(iprot); - struct.new_parts.add(_elem1052); + _elem1076 = new Partition(); + _elem1076.read(iprot); + struct.new_parts.add(_elem1076); } } struct.setNew_partsIsSet(true); @@ -75571,14 +76868,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1054 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1054.size); - PartitionSpec _elem1055; - for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056) + org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1078.size); + PartitionSpec _elem1079; + for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) { - _elem1055 = new PartitionSpec(); - _elem1055.read(iprot); - struct.new_parts.add(_elem1055); + _elem1079 = new PartitionSpec(); + _elem1079.read(iprot); + struct.new_parts.add(_elem1079); } iprot.readListEnd(); } @@ -75604,9 +76901,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1057 : struct.new_parts) + for (PartitionSpec _iter1081 : struct.new_parts) { - _iter1057.write(oprot); + _iter1081.write(oprot); } oprot.writeListEnd(); } @@ -75637,9 +76934,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1058 : struct.new_parts) + for (PartitionSpec _iter1082 : struct.new_parts) { - _iter1058.write(oprot); + _iter1082.write(oprot); } } } @@ -75651,14 +76948,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1059.size); - PartitionSpec _elem1060; - for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061) + org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1083.size); + PartitionSpec _elem1084; + for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) { - _elem1060 = new PartitionSpec(); - _elem1060.read(iprot); - struct.new_parts.add(_elem1060); + _elem1084 = new PartitionSpec(); + _elem1084.read(iprot); + struct.new_parts.add(_elem1084); } } struct.setNew_partsIsSet(true); @@ -76834,13 +78131,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1062 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1062.size); - String _elem1063; - for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064) + org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1086.size); + String _elem1087; + for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) { - _elem1063 = iprot.readString(); - struct.part_vals.add(_elem1063); + _elem1087 = iprot.readString(); + struct.part_vals.add(_elem1087); } iprot.readListEnd(); } @@ -76876,9 +78173,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1065 : struct.part_vals) + for (String _iter1089 : struct.part_vals) { - oprot.writeString(_iter1065); + oprot.writeString(_iter1089); } oprot.writeListEnd(); } @@ -76921,9 +78218,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1066 : struct.part_vals) + for (String _iter1090 : struct.part_vals) { - oprot.writeString(_iter1066); + oprot.writeString(_iter1090); } } } @@ -76943,13 +78240,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1067 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1067.size); - String _elem1068; - for (int _i1069 = 0; _i1069 < _list1067.size; ++_i1069) + org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1091.size); + String _elem1092; + for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) { - _elem1068 = iprot.readString(); - struct.part_vals.add(_elem1068); + _elem1092 = iprot.readString(); + struct.part_vals.add(_elem1092); } } struct.setPart_valsIsSet(true); @@ -79258,13 +80555,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1070 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1070.size); - String _elem1071; - for (int _i1072 = 0; _i1072 < _list1070.size; ++_i1072) + org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1094.size); + String _elem1095; + for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) { - _elem1071 = iprot.readString(); - struct.part_vals.add(_elem1071); + _elem1095 = iprot.readString(); + struct.part_vals.add(_elem1095); } iprot.readListEnd(); } @@ -79309,9 +80606,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1073 : struct.part_vals) + for (String _iter1097 : struct.part_vals) { - oprot.writeString(_iter1073); + oprot.writeString(_iter1097); } oprot.writeListEnd(); } @@ -79362,9 +80659,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1074 : struct.part_vals) + for (String _iter1098 : struct.part_vals) { - oprot.writeString(_iter1074); + oprot.writeString(_iter1098); } } } @@ -79387,13 +80684,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1075 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1075.size); - String _elem1076; - for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) + org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1099.size); + String _elem1100; + for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) { - _elem1076 = iprot.readString(); - struct.part_vals.add(_elem1076); + _elem1100 = iprot.readString(); + struct.part_vals.add(_elem1100); } } struct.setPart_valsIsSet(true); @@ -83263,13 +84560,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1078.size); - String _elem1079; - for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) + org.apache.thrift.protocol.TList _list1102 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1102.size); + String _elem1103; + for (int _i1104 = 0; _i1104 < _list1102.size; ++_i1104) { - _elem1079 = iprot.readString(); - struct.part_vals.add(_elem1079); + _elem1103 = iprot.readString(); + struct.part_vals.add(_elem1103); } iprot.readListEnd(); } @@ -83313,9 +84610,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1081 : struct.part_vals) + for (String _iter1105 : struct.part_vals) { - oprot.writeString(_iter1081); + oprot.writeString(_iter1105); } oprot.writeListEnd(); } @@ -83364,9 +84661,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1082 : struct.part_vals) + for (String _iter1106 : struct.part_vals) { - oprot.writeString(_iter1082); + oprot.writeString(_iter1106); } } } @@ -83389,13 +84686,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1083.size); - String _elem1084; - for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) + org.apache.thrift.protocol.TList _list1107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1107.size); + String _elem1108; + for (int _i1109 = 0; _i1109 < _list1107.size; ++_i1109) { - _elem1084 = iprot.readString(); - struct.part_vals.add(_elem1084); + _elem1108 = iprot.readString(); + struct.part_vals.add(_elem1108); } } struct.setPart_valsIsSet(true); @@ -84634,13 +85931,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1086.size); - String _elem1087; - for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) + org.apache.thrift.protocol.TList _list1110 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1110.size); + String _elem1111; + for (int _i1112 = 0; _i1112 < _list1110.size; ++_i1112) { - _elem1087 = iprot.readString(); - struct.part_vals.add(_elem1087); + _elem1111 = iprot.readString(); + struct.part_vals.add(_elem1111); } iprot.readListEnd(); } @@ -84693,9 +85990,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1089 : struct.part_vals) + for (String _iter1113 : struct.part_vals) { - oprot.writeString(_iter1089); + oprot.writeString(_iter1113); } oprot.writeListEnd(); } @@ -84752,9 +86049,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1090 : struct.part_vals) + for (String _iter1114 : struct.part_vals) { - oprot.writeString(_iter1090); + oprot.writeString(_iter1114); } } } @@ -84780,13 +86077,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1091.size); - String _elem1092; - for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) + org.apache.thrift.protocol.TList _list1115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1115.size); + String _elem1116; + for (int _i1117 = 0; _i1117 < _list1115.size; ++_i1117) { - _elem1092 = iprot.readString(); - struct.part_vals.add(_elem1092); + _elem1116 = iprot.readString(); + struct.part_vals.add(_elem1116); } } struct.setPart_valsIsSet(true); @@ -89388,13 +90685,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1094.size); - String _elem1095; - for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) + org.apache.thrift.protocol.TList _list1118 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1118.size); + String _elem1119; + for (int _i1120 = 0; _i1120 < _list1118.size; ++_i1120) { - _elem1095 = iprot.readString(); - struct.part_vals.add(_elem1095); + _elem1119 = iprot.readString(); + struct.part_vals.add(_elem1119); } iprot.readListEnd(); } @@ -89430,9 +90727,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1097 : struct.part_vals) + for (String _iter1121 : struct.part_vals) { - oprot.writeString(_iter1097); + oprot.writeString(_iter1121); } oprot.writeListEnd(); } @@ -89475,9 +90772,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1098 : struct.part_vals) + for (String _iter1122 : struct.part_vals) { - oprot.writeString(_iter1098); + oprot.writeString(_iter1122); } } } @@ -89497,13 +90794,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1099.size); - String _elem1100; - for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) + org.apache.thrift.protocol.TList _list1123 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1123.size); + String _elem1124; + for (int _i1125 = 0; _i1125 < _list1123.size; ++_i1125) { - _elem1100 = iprot.readString(); - struct.part_vals.add(_elem1100); + _elem1124 = iprot.readString(); + struct.part_vals.add(_elem1124); } } struct.setPart_valsIsSet(true); @@ -90721,15 +92018,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1102 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1102.size); - String _key1103; - String _val1104; - for (int _i1105 = 0; _i1105 < _map1102.size; ++_i1105) + org.apache.thrift.protocol.TMap _map1126 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1126.size); + String _key1127; + String _val1128; + for (int _i1129 = 0; _i1129 < _map1126.size; ++_i1129) { - _key1103 = iprot.readString(); - _val1104 = iprot.readString(); - struct.partitionSpecs.put(_key1103, _val1104); + _key1127 = iprot.readString(); + _val1128 = iprot.readString(); + struct.partitionSpecs.put(_key1127, _val1128); } iprot.readMapEnd(); } @@ -90787,10 +92084,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1106 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1130 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1106.getKey()); - oprot.writeString(_iter1106.getValue()); + oprot.writeString(_iter1130.getKey()); + oprot.writeString(_iter1130.getValue()); } oprot.writeMapEnd(); } @@ -90853,10 +92150,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1107 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1131 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1107.getKey()); - oprot.writeString(_iter1107.getValue()); + oprot.writeString(_iter1131.getKey()); + oprot.writeString(_iter1131.getValue()); } } } @@ -90880,15 +92177,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1108 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1108.size); - String _key1109; - String _val1110; - for (int _i1111 = 0; _i1111 < _map1108.size; ++_i1111) + org.apache.thrift.protocol.TMap _map1132 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1132.size); + String _key1133; + String _val1134; + for (int _i1135 = 0; _i1135 < _map1132.size; ++_i1135) { - _key1109 = iprot.readString(); - _val1110 = iprot.readString(); - struct.partitionSpecs.put(_key1109, _val1110); + _key1133 = iprot.readString(); + _val1134 = iprot.readString(); + struct.partitionSpecs.put(_key1133, _val1134); } } struct.setPartitionSpecsIsSet(true); @@ -92334,15 +93631,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1112 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1112.size); - String _key1113; - String _val1114; - for (int _i1115 = 0; _i1115 < _map1112.size; ++_i1115) + org.apache.thrift.protocol.TMap _map1136 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1136.size); + String _key1137; + String _val1138; + for (int _i1139 = 0; _i1139 < _map1136.size; ++_i1139) { - _key1113 = iprot.readString(); - _val1114 = iprot.readString(); - struct.partitionSpecs.put(_key1113, _val1114); + _key1137 = iprot.readString(); + _val1138 = iprot.readString(); + struct.partitionSpecs.put(_key1137, _val1138); } iprot.readMapEnd(); } @@ -92400,10 +93697,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1116 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1140 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1116.getKey()); - oprot.writeString(_iter1116.getValue()); + oprot.writeString(_iter1140.getKey()); + oprot.writeString(_iter1140.getValue()); } oprot.writeMapEnd(); } @@ -92466,10 +93763,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1117 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1141 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1117.getKey()); - oprot.writeString(_iter1117.getValue()); + oprot.writeString(_iter1141.getKey()); + oprot.writeString(_iter1141.getValue()); } } } @@ -92493,15 +93790,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1118 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1118.size); - String _key1119; - String _val1120; - for (int _i1121 = 0; _i1121 < _map1118.size; ++_i1121) + org.apache.thrift.protocol.TMap _map1142 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1142.size); + String _key1143; + String _val1144; + for (int _i1145 = 0; _i1145 < _map1142.size; ++_i1145) { - _key1119 = iprot.readString(); - _val1120 = iprot.readString(); - struct.partitionSpecs.put(_key1119, _val1120); + _key1143 = iprot.readString(); + _val1144 = iprot.readString(); + struct.partitionSpecs.put(_key1143, _val1144); } } struct.setPartitionSpecsIsSet(true); @@ -93166,14 +94463,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin(); - struct.success = new ArrayList(_list1122.size); - Partition _elem1123; - for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124) + org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); + struct.success = new ArrayList(_list1146.size); + Partition _elem1147; + for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) { - _elem1123 = new Partition(); - _elem1123.read(iprot); - struct.success.add(_elem1123); + _elem1147 = new Partition(); + _elem1147.read(iprot); + struct.success.add(_elem1147); } iprot.readListEnd(); } @@ -93235,9 +94532,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1125 : struct.success) + for (Partition _iter1149 : struct.success) { - _iter1125.write(oprot); + _iter1149.write(oprot); } oprot.writeListEnd(); } @@ -93300,9 +94597,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1126 : struct.success) + for (Partition _iter1150 : struct.success) { - _iter1126.write(oprot); + _iter1150.write(oprot); } } } @@ -93326,14 +94623,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1127.size); - Partition _elem1128; - for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129) + org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1151.size); + Partition _elem1152; + for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) { - _elem1128 = new Partition(); - _elem1128.read(iprot); - struct.success.add(_elem1128); + _elem1152 = new Partition(); + _elem1152.read(iprot); + struct.success.add(_elem1152); } } struct.setSuccessIsSet(true); @@ -94032,13 +95329,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1130 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1130.size); - String _elem1131; - for (int _i1132 = 0; _i1132 < _list1130.size; ++_i1132) + org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1154.size); + String _elem1155; + for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) { - _elem1131 = iprot.readString(); - struct.part_vals.add(_elem1131); + _elem1155 = iprot.readString(); + struct.part_vals.add(_elem1155); } iprot.readListEnd(); } @@ -94058,13 +95355,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1133 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1133.size); - String _elem1134; - for (int _i1135 = 0; _i1135 < _list1133.size; ++_i1135) + org.apache.thrift.protocol.TList _list1157 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1157.size); + String _elem1158; + for (int _i1159 = 0; _i1159 < _list1157.size; ++_i1159) { - _elem1134 = iprot.readString(); - struct.group_names.add(_elem1134); + _elem1158 = iprot.readString(); + struct.group_names.add(_elem1158); } iprot.readListEnd(); } @@ -94100,9 +95397,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1136 : struct.part_vals) + for (String _iter1160 : struct.part_vals) { - oprot.writeString(_iter1136); + oprot.writeString(_iter1160); } oprot.writeListEnd(); } @@ -94117,9 +95414,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1137 : struct.group_names) + for (String _iter1161 : struct.group_names) { - oprot.writeString(_iter1137); + oprot.writeString(_iter1161); } oprot.writeListEnd(); } @@ -94168,9 +95465,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1138 : struct.part_vals) + for (String _iter1162 : struct.part_vals) { - oprot.writeString(_iter1138); + oprot.writeString(_iter1162); } } } @@ -94180,9 +95477,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1139 : struct.group_names) + for (String _iter1163 : struct.group_names) { - oprot.writeString(_iter1139); + oprot.writeString(_iter1163); } } } @@ -94202,13 +95499,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1140 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1140.size); - String _elem1141; - for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) + org.apache.thrift.protocol.TList _list1164 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1164.size); + String _elem1165; + for (int _i1166 = 0; _i1166 < _list1164.size; ++_i1166) { - _elem1141 = iprot.readString(); - struct.part_vals.add(_elem1141); + _elem1165 = iprot.readString(); + struct.part_vals.add(_elem1165); } } struct.setPart_valsIsSet(true); @@ -94219,13 +95516,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1143.size); - String _elem1144; - for (int _i1145 = 0; _i1145 < _list1143.size; ++_i1145) + org.apache.thrift.protocol.TList _list1167 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1167.size); + String _elem1168; + for (int _i1169 = 0; _i1169 < _list1167.size; ++_i1169) { - _elem1144 = iprot.readString(); - struct.group_names.add(_elem1144); + _elem1168 = iprot.readString(); + struct.group_names.add(_elem1168); } } struct.setGroup_namesIsSet(true); @@ -96994,14 +98291,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1146 = iprot.readListBegin(); - struct.success = new ArrayList(_list1146.size); - Partition _elem1147; - for (int _i1148 = 0; _i1148 < _list1146.size; ++_i1148) + org.apache.thrift.protocol.TList _list1170 = iprot.readListBegin(); + struct.success = new ArrayList(_list1170.size); + Partition _elem1171; + for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) { - _elem1147 = new Partition(); - _elem1147.read(iprot); - struct.success.add(_elem1147); + _elem1171 = new Partition(); + _elem1171.read(iprot); + struct.success.add(_elem1171); } iprot.readListEnd(); } @@ -97045,9 +98342,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1149 : struct.success) + for (Partition _iter1173 : struct.success) { - _iter1149.write(oprot); + _iter1173.write(oprot); } oprot.writeListEnd(); } @@ -97094,9 +98391,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1150 : struct.success) + for (Partition _iter1174 : struct.success) { - _iter1150.write(oprot); + _iter1174.write(oprot); } } } @@ -97114,14 +98411,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1151.size); - Partition _elem1152; - for (int _i1153 = 0; _i1153 < _list1151.size; ++_i1153) + org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1175.size); + Partition _elem1176; + for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) { - _elem1152 = new Partition(); - _elem1152.read(iprot); - struct.success.add(_elem1152); + _elem1176 = new Partition(); + _elem1176.read(iprot); + struct.success.add(_elem1176); } } struct.setSuccessIsSet(true); @@ -97811,13 +99108,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1154 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1154.size); - String _elem1155; - for (int _i1156 = 0; _i1156 < _list1154.size; ++_i1156) + org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1178.size); + String _elem1179; + for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) { - _elem1155 = iprot.readString(); - struct.group_names.add(_elem1155); + _elem1179 = iprot.readString(); + struct.group_names.add(_elem1179); } iprot.readListEnd(); } @@ -97861,9 +99158,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1157 : struct.group_names) + for (String _iter1181 : struct.group_names) { - oprot.writeString(_iter1157); + oprot.writeString(_iter1181); } oprot.writeListEnd(); } @@ -97918,9 +99215,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1158 : struct.group_names) + for (String _iter1182 : struct.group_names) { - oprot.writeString(_iter1158); + oprot.writeString(_iter1182); } } } @@ -97948,13 +99245,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1159.size); - String _elem1160; - for (int _i1161 = 0; _i1161 < _list1159.size; ++_i1161) + org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1183.size); + String _elem1184; + for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) { - _elem1160 = iprot.readString(); - struct.group_names.add(_elem1160); + _elem1184 = iprot.readString(); + struct.group_names.add(_elem1184); } } struct.setGroup_namesIsSet(true); @@ -98441,14 +99738,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1162 = iprot.readListBegin(); - struct.success = new ArrayList(_list1162.size); - Partition _elem1163; - for (int _i1164 = 0; _i1164 < _list1162.size; ++_i1164) + org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); + struct.success = new ArrayList(_list1186.size); + Partition _elem1187; + for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) { - _elem1163 = new Partition(); - _elem1163.read(iprot); - struct.success.add(_elem1163); + _elem1187 = new Partition(); + _elem1187.read(iprot); + struct.success.add(_elem1187); } iprot.readListEnd(); } @@ -98492,9 +99789,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1165 : struct.success) + for (Partition _iter1189 : struct.success) { - _iter1165.write(oprot); + _iter1189.write(oprot); } oprot.writeListEnd(); } @@ -98541,9 +99838,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1166 : struct.success) + for (Partition _iter1190 : struct.success) { - _iter1166.write(oprot); + _iter1190.write(oprot); } } } @@ -98561,14 +99858,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1167 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1167.size); - Partition _elem1168; - for (int _i1169 = 0; _i1169 < _list1167.size; ++_i1169) + org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1191.size); + Partition _elem1192; + for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) { - _elem1168 = new Partition(); - _elem1168.read(iprot); - struct.success.add(_elem1168); + _elem1192 = new Partition(); + _elem1192.read(iprot); + struct.success.add(_elem1192); } } struct.setSuccessIsSet(true); @@ -99631,14 +100928,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1170 = iprot.readListBegin(); - struct.success = new ArrayList(_list1170.size); - PartitionSpec _elem1171; - for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) + org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); + struct.success = new ArrayList(_list1194.size); + PartitionSpec _elem1195; + for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) { - _elem1171 = new PartitionSpec(); - _elem1171.read(iprot); - struct.success.add(_elem1171); + _elem1195 = new PartitionSpec(); + _elem1195.read(iprot); + struct.success.add(_elem1195); } iprot.readListEnd(); } @@ -99682,9 +100979,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1173 : struct.success) + for (PartitionSpec _iter1197 : struct.success) { - _iter1173.write(oprot); + _iter1197.write(oprot); } oprot.writeListEnd(); } @@ -99731,9 +101028,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1174 : struct.success) + for (PartitionSpec _iter1198 : struct.success) { - _iter1174.write(oprot); + _iter1198.write(oprot); } } } @@ -99751,14 +101048,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1175 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1175.size); - PartitionSpec _elem1176; - for (int _i1177 = 0; _i1177 < _list1175.size; ++_i1177) + org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1199.size); + PartitionSpec _elem1200; + for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) { - _elem1176 = new PartitionSpec(); - _elem1176.read(iprot); - struct.success.add(_elem1176); + _elem1200 = new PartitionSpec(); + _elem1200.read(iprot); + struct.success.add(_elem1200); } } struct.setSuccessIsSet(true); @@ -100818,13 +102115,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1178 = iprot.readListBegin(); - struct.success = new ArrayList(_list1178.size); - String _elem1179; - for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) + org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); + struct.success = new ArrayList(_list1202.size); + String _elem1203; + for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) { - _elem1179 = iprot.readString(); - struct.success.add(_elem1179); + _elem1203 = iprot.readString(); + struct.success.add(_elem1203); } iprot.readListEnd(); } @@ -100868,9 +102165,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1181 : struct.success) + for (String _iter1205 : struct.success) { - oprot.writeString(_iter1181); + oprot.writeString(_iter1205); } oprot.writeListEnd(); } @@ -100917,9 +102214,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1182 : struct.success) + for (String _iter1206 : struct.success) { - oprot.writeString(_iter1182); + oprot.writeString(_iter1206); } } } @@ -100937,13 +102234,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1183.size); - String _elem1184; - for (int _i1185 = 0; _i1185 < _list1183.size; ++_i1185) + org.apache.thrift.protocol.TList _list1207 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1207.size); + String _elem1208; + for (int _i1209 = 0; _i1209 < _list1207.size; ++_i1209) { - _elem1184 = iprot.readString(); - struct.success.add(_elem1184); + _elem1208 = iprot.readString(); + struct.success.add(_elem1208); } } struct.setSuccessIsSet(true); @@ -102474,13 +103771,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1186 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1186.size); - String _elem1187; - for (int _i1188 = 0; _i1188 < _list1186.size; ++_i1188) + org.apache.thrift.protocol.TList _list1210 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1210.size); + String _elem1211; + for (int _i1212 = 0; _i1212 < _list1210.size; ++_i1212) { - _elem1187 = iprot.readString(); - struct.part_vals.add(_elem1187); + _elem1211 = iprot.readString(); + struct.part_vals.add(_elem1211); } iprot.readListEnd(); } @@ -102524,9 +103821,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1189 : struct.part_vals) + for (String _iter1213 : struct.part_vals) { - oprot.writeString(_iter1189); + oprot.writeString(_iter1213); } oprot.writeListEnd(); } @@ -102575,9 +103872,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1190 : struct.part_vals) + for (String _iter1214 : struct.part_vals) { - oprot.writeString(_iter1190); + oprot.writeString(_iter1214); } } } @@ -102600,13 +103897,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1191.size); - String _elem1192; - for (int _i1193 = 0; _i1193 < _list1191.size; ++_i1193) + org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1215.size); + String _elem1216; + for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) { - _elem1192 = iprot.readString(); - struct.part_vals.add(_elem1192); + _elem1216 = iprot.readString(); + struct.part_vals.add(_elem1216); } } struct.setPart_valsIsSet(true); @@ -103097,14 +104394,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1194 = iprot.readListBegin(); - struct.success = new ArrayList(_list1194.size); - Partition _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); + struct.success = new ArrayList(_list1218.size); + Partition _elem1219; + for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) { - _elem1195 = new Partition(); - _elem1195.read(iprot); - struct.success.add(_elem1195); + _elem1219 = new Partition(); + _elem1219.read(iprot); + struct.success.add(_elem1219); } iprot.readListEnd(); } @@ -103148,9 +104445,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1197 : struct.success) + for (Partition _iter1221 : struct.success) { - _iter1197.write(oprot); + _iter1221.write(oprot); } oprot.writeListEnd(); } @@ -103197,9 +104494,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1198 : struct.success) + for (Partition _iter1222 : struct.success) { - _iter1198.write(oprot); + _iter1222.write(oprot); } } } @@ -103217,14 +104514,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1199.size); - Partition _elem1200; - for (int _i1201 = 0; _i1201 < _list1199.size; ++_i1201) + org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1223.size); + Partition _elem1224; + for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) { - _elem1200 = new Partition(); - _elem1200.read(iprot); - struct.success.add(_elem1200); + _elem1224 = new Partition(); + _elem1224.read(iprot); + struct.success.add(_elem1224); } } struct.setSuccessIsSet(true); @@ -103996,13 +105293,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1202 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1202.size); - String _elem1203; - for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) + org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1226.size); + String _elem1227; + for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) { - _elem1203 = iprot.readString(); - struct.part_vals.add(_elem1203); + _elem1227 = iprot.readString(); + struct.part_vals.add(_elem1227); } iprot.readListEnd(); } @@ -104030,13 +105327,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1205 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1205.size); - String _elem1206; - for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) + org.apache.thrift.protocol.TList _list1229 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1229.size); + String _elem1230; + for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) { - _elem1206 = iprot.readString(); - struct.group_names.add(_elem1206); + _elem1230 = iprot.readString(); + struct.group_names.add(_elem1230); } iprot.readListEnd(); } @@ -104072,9 +105369,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1208 : struct.part_vals) + for (String _iter1232 : struct.part_vals) { - oprot.writeString(_iter1208); + oprot.writeString(_iter1232); } oprot.writeListEnd(); } @@ -104092,9 +105389,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1209 : struct.group_names) + for (String _iter1233 : struct.group_names) { - oprot.writeString(_iter1209); + oprot.writeString(_iter1233); } oprot.writeListEnd(); } @@ -104146,9 +105443,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1210 : struct.part_vals) + for (String _iter1234 : struct.part_vals) { - oprot.writeString(_iter1210); + oprot.writeString(_iter1234); } } } @@ -104161,9 +105458,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1211 : struct.group_names) + for (String _iter1235 : struct.group_names) { - oprot.writeString(_iter1211); + oprot.writeString(_iter1235); } } } @@ -104183,13 +105480,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1212 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1212.size); - String _elem1213; - for (int _i1214 = 0; _i1214 < _list1212.size; ++_i1214) + org.apache.thrift.protocol.TList _list1236 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1236.size); + String _elem1237; + for (int _i1238 = 0; _i1238 < _list1236.size; ++_i1238) { - _elem1213 = iprot.readString(); - struct.part_vals.add(_elem1213); + _elem1237 = iprot.readString(); + struct.part_vals.add(_elem1237); } } struct.setPart_valsIsSet(true); @@ -104204,13 +105501,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1215 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1215.size); - String _elem1216; - for (int _i1217 = 0; _i1217 < _list1215.size; ++_i1217) + org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1239.size); + String _elem1240; + for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) { - _elem1216 = iprot.readString(); - struct.group_names.add(_elem1216); + _elem1240 = iprot.readString(); + struct.group_names.add(_elem1240); } } struct.setGroup_namesIsSet(true); @@ -104697,14 +105994,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1218 = iprot.readListBegin(); - struct.success = new ArrayList(_list1218.size); - Partition _elem1219; - for (int _i1220 = 0; _i1220 < _list1218.size; ++_i1220) + org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); + struct.success = new ArrayList(_list1242.size); + Partition _elem1243; + for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) { - _elem1219 = new Partition(); - _elem1219.read(iprot); - struct.success.add(_elem1219); + _elem1243 = new Partition(); + _elem1243.read(iprot); + struct.success.add(_elem1243); } iprot.readListEnd(); } @@ -104748,9 +106045,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1221 : struct.success) + for (Partition _iter1245 : struct.success) { - _iter1221.write(oprot); + _iter1245.write(oprot); } oprot.writeListEnd(); } @@ -104797,9 +106094,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1222 : struct.success) + for (Partition _iter1246 : struct.success) { - _iter1222.write(oprot); + _iter1246.write(oprot); } } } @@ -104817,14 +106114,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1223 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1223.size); - Partition _elem1224; - for (int _i1225 = 0; _i1225 < _list1223.size; ++_i1225) + org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1247.size); + Partition _elem1248; + for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) { - _elem1224 = new Partition(); - _elem1224.read(iprot); - struct.success.add(_elem1224); + _elem1248 = new Partition(); + _elem1248.read(iprot); + struct.success.add(_elem1248); } } struct.setSuccessIsSet(true); @@ -105417,13 +106714,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1226 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1226.size); - String _elem1227; - for (int _i1228 = 0; _i1228 < _list1226.size; ++_i1228) + org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1250.size); + String _elem1251; + for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) { - _elem1227 = iprot.readString(); - struct.part_vals.add(_elem1227); + _elem1251 = iprot.readString(); + struct.part_vals.add(_elem1251); } iprot.readListEnd(); } @@ -105467,9 +106764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1229 : struct.part_vals) + for (String _iter1253 : struct.part_vals) { - oprot.writeString(_iter1229); + oprot.writeString(_iter1253); } oprot.writeListEnd(); } @@ -105518,9 +106815,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1230 : struct.part_vals) + for (String _iter1254 : struct.part_vals) { - oprot.writeString(_iter1230); + oprot.writeString(_iter1254); } } } @@ -105543,13 +106840,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1231 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1231.size); - String _elem1232; - for (int _i1233 = 0; _i1233 < _list1231.size; ++_i1233) + org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1255.size); + String _elem1256; + for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) { - _elem1232 = iprot.readString(); - struct.part_vals.add(_elem1232); + _elem1256 = iprot.readString(); + struct.part_vals.add(_elem1256); } } struct.setPart_valsIsSet(true); @@ -106037,13 +107334,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1234 = iprot.readListBegin(); - struct.success = new ArrayList(_list1234.size); - String _elem1235; - for (int _i1236 = 0; _i1236 < _list1234.size; ++_i1236) + org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); + struct.success = new ArrayList(_list1258.size); + String _elem1259; + for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) { - _elem1235 = iprot.readString(); - struct.success.add(_elem1235); + _elem1259 = iprot.readString(); + struct.success.add(_elem1259); } iprot.readListEnd(); } @@ -106087,9 +107384,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1237 : struct.success) + for (String _iter1261 : struct.success) { - oprot.writeString(_iter1237); + oprot.writeString(_iter1261); } oprot.writeListEnd(); } @@ -106136,9 +107433,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1238 : struct.success) + for (String _iter1262 : struct.success) { - oprot.writeString(_iter1238); + oprot.writeString(_iter1262); } } } @@ -106156,13 +107453,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1239 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1239.size); - String _elem1240; - for (int _i1241 = 0; _i1241 < _list1239.size; ++_i1241) + org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1263.size); + String _elem1264; + for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) { - _elem1240 = iprot.readString(); - struct.success.add(_elem1240); + _elem1264 = iprot.readString(); + struct.success.add(_elem1264); } } struct.setSuccessIsSet(true); @@ -107329,14 +108626,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1242 = iprot.readListBegin(); - struct.success = new ArrayList(_list1242.size); - Partition _elem1243; - for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) + org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); + struct.success = new ArrayList(_list1266.size); + Partition _elem1267; + for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) { - _elem1243 = new Partition(); - _elem1243.read(iprot); - struct.success.add(_elem1243); + _elem1267 = new Partition(); + _elem1267.read(iprot); + struct.success.add(_elem1267); } iprot.readListEnd(); } @@ -107380,9 +108677,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1245 : struct.success) + for (Partition _iter1269 : struct.success) { - _iter1245.write(oprot); + _iter1269.write(oprot); } oprot.writeListEnd(); } @@ -107429,9 +108726,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1246 : struct.success) + for (Partition _iter1270 : struct.success) { - _iter1246.write(oprot); + _iter1270.write(oprot); } } } @@ -107449,14 +108746,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1247 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1247.size); - Partition _elem1248; - for (int _i1249 = 0; _i1249 < _list1247.size; ++_i1249) + org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1271.size); + Partition _elem1272; + for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) { - _elem1248 = new Partition(); - _elem1248.read(iprot); - struct.success.add(_elem1248); + _elem1272 = new Partition(); + _elem1272.read(iprot); + struct.success.add(_elem1272); } } struct.setSuccessIsSet(true); @@ -108623,14 +109920,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1250 = iprot.readListBegin(); - struct.success = new ArrayList(_list1250.size); - PartitionSpec _elem1251; - for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) + org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); + struct.success = new ArrayList(_list1274.size); + PartitionSpec _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) { - _elem1251 = new PartitionSpec(); - _elem1251.read(iprot); - struct.success.add(_elem1251); + _elem1275 = new PartitionSpec(); + _elem1275.read(iprot); + struct.success.add(_elem1275); } iprot.readListEnd(); } @@ -108674,9 +109971,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1253 : struct.success) + for (PartitionSpec _iter1277 : struct.success) { - _iter1253.write(oprot); + _iter1277.write(oprot); } oprot.writeListEnd(); } @@ -108723,9 +110020,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1254 : struct.success) + for (PartitionSpec _iter1278 : struct.success) { - _iter1254.write(oprot); + _iter1278.write(oprot); } } } @@ -108743,14 +110040,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1255 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1255.size); - PartitionSpec _elem1256; - for (int _i1257 = 0; _i1257 < _list1255.size; ++_i1257) + org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1279.size); + PartitionSpec _elem1280; + for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) { - _elem1256 = new PartitionSpec(); - _elem1256.read(iprot); - struct.success.add(_elem1256); + _elem1280 = new PartitionSpec(); + _elem1280.read(iprot); + struct.success.add(_elem1280); } } struct.setSuccessIsSet(true); @@ -111334,13 +112631,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1258 = iprot.readListBegin(); - struct.names = new ArrayList(_list1258.size); - String _elem1259; - for (int _i1260 = 0; _i1260 < _list1258.size; ++_i1260) + org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); + struct.names = new ArrayList(_list1282.size); + String _elem1283; + for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) { - _elem1259 = iprot.readString(); - struct.names.add(_elem1259); + _elem1283 = iprot.readString(); + struct.names.add(_elem1283); } iprot.readListEnd(); } @@ -111376,9 +112673,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1261 : struct.names) + for (String _iter1285 : struct.names) { - oprot.writeString(_iter1261); + oprot.writeString(_iter1285); } oprot.writeListEnd(); } @@ -111421,9 +112718,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1262 : struct.names) + for (String _iter1286 : struct.names) { - oprot.writeString(_iter1262); + oprot.writeString(_iter1286); } } } @@ -111443,13 +112740,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1263.size); - String _elem1264; - for (int _i1265 = 0; _i1265 < _list1263.size; ++_i1265) + org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1287.size); + String _elem1288; + for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) { - _elem1264 = iprot.readString(); - struct.names.add(_elem1264); + _elem1288 = iprot.readString(); + struct.names.add(_elem1288); } } struct.setNamesIsSet(true); @@ -111936,14 +113233,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1266 = iprot.readListBegin(); - struct.success = new ArrayList(_list1266.size); - Partition _elem1267; - for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) + org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); + struct.success = new ArrayList(_list1290.size); + Partition _elem1291; + for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) { - _elem1267 = new Partition(); - _elem1267.read(iprot); - struct.success.add(_elem1267); + _elem1291 = new Partition(); + _elem1291.read(iprot); + struct.success.add(_elem1291); } iprot.readListEnd(); } @@ -111987,9 +113284,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1269 : struct.success) + for (Partition _iter1293 : struct.success) { - _iter1269.write(oprot); + _iter1293.write(oprot); } oprot.writeListEnd(); } @@ -112036,9 +113333,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1270 : struct.success) + for (Partition _iter1294 : struct.success) { - _iter1270.write(oprot); + _iter1294.write(oprot); } } } @@ -112056,14 +113353,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1271.size); - Partition _elem1272; - for (int _i1273 = 0; _i1273 < _list1271.size; ++_i1273) + org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1295.size); + Partition _elem1296; + for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) { - _elem1272 = new Partition(); - _elem1272.read(iprot); - struct.success.add(_elem1272); + _elem1296 = new Partition(); + _elem1296.read(iprot); + struct.success.add(_elem1296); } } struct.setSuccessIsSet(true); @@ -113613,14 +114910,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1274 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1274.size); - Partition _elem1275; - for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) + org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1298.size); + Partition _elem1299; + for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) { - _elem1275 = new Partition(); - _elem1275.read(iprot); - struct.new_parts.add(_elem1275); + _elem1299 = new Partition(); + _elem1299.read(iprot); + struct.new_parts.add(_elem1299); } iprot.readListEnd(); } @@ -113656,9 +114953,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1277 : struct.new_parts) + for (Partition _iter1301 : struct.new_parts) { - _iter1277.write(oprot); + _iter1301.write(oprot); } oprot.writeListEnd(); } @@ -113701,9 +114998,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1278 : struct.new_parts) + for (Partition _iter1302 : struct.new_parts) { - _iter1278.write(oprot); + _iter1302.write(oprot); } } } @@ -113723,14 +115020,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1279 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1279.size); - Partition _elem1280; - for (int _i1281 = 0; _i1281 < _list1279.size; ++_i1281) + org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1303.size); + Partition _elem1304; + for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) { - _elem1280 = new Partition(); - _elem1280.read(iprot); - struct.new_parts.add(_elem1280); + _elem1304 = new Partition(); + _elem1304.read(iprot); + struct.new_parts.add(_elem1304); } } struct.setNew_partsIsSet(true); @@ -114783,14 +116080,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1282 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1282.size); - Partition _elem1283; - for (int _i1284 = 0; _i1284 < _list1282.size; ++_i1284) + org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1306.size); + Partition _elem1307; + for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) { - _elem1283 = new Partition(); - _elem1283.read(iprot); - struct.new_parts.add(_elem1283); + _elem1307 = new Partition(); + _elem1307.read(iprot); + struct.new_parts.add(_elem1307); } iprot.readListEnd(); } @@ -114835,9 +116132,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1285 : struct.new_parts) + for (Partition _iter1309 : struct.new_parts) { - _iter1285.write(oprot); + _iter1309.write(oprot); } oprot.writeListEnd(); } @@ -114888,9 +116185,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1286 : struct.new_parts) + for (Partition _iter1310 : struct.new_parts) { - _iter1286.write(oprot); + _iter1310.write(oprot); } } } @@ -114913,14 +116210,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1287 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1287.size); - Partition _elem1288; - for (int _i1289 = 0; _i1289 < _list1287.size; ++_i1289) + org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1311.size); + Partition _elem1312; + for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) { - _elem1288 = new Partition(); - _elem1288.read(iprot); - struct.new_parts.add(_elem1288); + _elem1312 = new Partition(); + _elem1312.read(iprot); + struct.new_parts.add(_elem1312); } } struct.setNew_partsIsSet(true); @@ -117121,13 +118418,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1290 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1290.size); - String _elem1291; - for (int _i1292 = 0; _i1292 < _list1290.size; ++_i1292) + org.apache.thrift.protocol.TList _list1314 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1314.size); + String _elem1315; + for (int _i1316 = 0; _i1316 < _list1314.size; ++_i1316) { - _elem1291 = iprot.readString(); - struct.part_vals.add(_elem1291); + _elem1315 = iprot.readString(); + struct.part_vals.add(_elem1315); } iprot.readListEnd(); } @@ -117172,9 +118469,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1293 : struct.part_vals) + for (String _iter1317 : struct.part_vals) { - oprot.writeString(_iter1293); + oprot.writeString(_iter1317); } oprot.writeListEnd(); } @@ -117225,9 +118522,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1294 : struct.part_vals) + for (String _iter1318 : struct.part_vals) { - oprot.writeString(_iter1294); + oprot.writeString(_iter1318); } } } @@ -117250,13 +118547,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1295.size); - String _elem1296; - for (int _i1297 = 0; _i1297 < _list1295.size; ++_i1297) + org.apache.thrift.protocol.TList _list1319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1319.size); + String _elem1320; + for (int _i1321 = 0; _i1321 < _list1319.size; ++_i1321) { - _elem1296 = iprot.readString(); - struct.part_vals.add(_elem1296); + _elem1320 = iprot.readString(); + struct.part_vals.add(_elem1320); } } struct.setPart_valsIsSet(true); @@ -118130,13 +119427,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1298 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1298.size); - String _elem1299; - for (int _i1300 = 0; _i1300 < _list1298.size; ++_i1300) + org.apache.thrift.protocol.TList _list1322 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1322.size); + String _elem1323; + for (int _i1324 = 0; _i1324 < _list1322.size; ++_i1324) { - _elem1299 = iprot.readString(); - struct.part_vals.add(_elem1299); + _elem1323 = iprot.readString(); + struct.part_vals.add(_elem1323); } iprot.readListEnd(); } @@ -118170,9 +119467,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1301 : struct.part_vals) + for (String _iter1325 : struct.part_vals) { - oprot.writeString(_iter1301); + oprot.writeString(_iter1325); } oprot.writeListEnd(); } @@ -118209,9 +119506,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1302 : struct.part_vals) + for (String _iter1326 : struct.part_vals) { - oprot.writeString(_iter1302); + oprot.writeString(_iter1326); } } } @@ -118226,13 +119523,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1303.size); - String _elem1304; - for (int _i1305 = 0; _i1305 < _list1303.size; ++_i1305) + org.apache.thrift.protocol.TList _list1327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1327.size); + String _elem1328; + for (int _i1329 = 0; _i1329 < _list1327.size; ++_i1329) { - _elem1304 = iprot.readString(); - struct.part_vals.add(_elem1304); + _elem1328 = iprot.readString(); + struct.part_vals.add(_elem1328); } } struct.setPart_valsIsSet(true); @@ -120387,13 +121684,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1306 = iprot.readListBegin(); - struct.success = new ArrayList(_list1306.size); - String _elem1307; - for (int _i1308 = 0; _i1308 < _list1306.size; ++_i1308) + org.apache.thrift.protocol.TList _list1330 = iprot.readListBegin(); + struct.success = new ArrayList(_list1330.size); + String _elem1331; + for (int _i1332 = 0; _i1332 < _list1330.size; ++_i1332) { - _elem1307 = iprot.readString(); - struct.success.add(_elem1307); + _elem1331 = iprot.readString(); + struct.success.add(_elem1331); } iprot.readListEnd(); } @@ -120428,9 +121725,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1309 : struct.success) + for (String _iter1333 : struct.success) { - oprot.writeString(_iter1309); + oprot.writeString(_iter1333); } oprot.writeListEnd(); } @@ -120469,9 +121766,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1310 : struct.success) + for (String _iter1334 : struct.success) { - oprot.writeString(_iter1310); + oprot.writeString(_iter1334); } } } @@ -120486,13 +121783,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1311.size); - String _elem1312; - for (int _i1313 = 0; _i1313 < _list1311.size; ++_i1313) + org.apache.thrift.protocol.TList _list1335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1335.size); + String _elem1336; + for (int _i1337 = 0; _i1337 < _list1335.size; ++_i1337) { - _elem1312 = iprot.readString(); - struct.success.add(_elem1312); + _elem1336 = iprot.readString(); + struct.success.add(_elem1336); } } struct.setSuccessIsSet(true); @@ -121255,15 +122552,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1314 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1314.size); - String _key1315; - String _val1316; - for (int _i1317 = 0; _i1317 < _map1314.size; ++_i1317) + org.apache.thrift.protocol.TMap _map1338 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1338.size); + String _key1339; + String _val1340; + for (int _i1341 = 0; _i1341 < _map1338.size; ++_i1341) { - _key1315 = iprot.readString(); - _val1316 = iprot.readString(); - struct.success.put(_key1315, _val1316); + _key1339 = iprot.readString(); + _val1340 = iprot.readString(); + struct.success.put(_key1339, _val1340); } iprot.readMapEnd(); } @@ -121298,10 +122595,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1318 : struct.success.entrySet()) + for (Map.Entry _iter1342 : struct.success.entrySet()) { - oprot.writeString(_iter1318.getKey()); - oprot.writeString(_iter1318.getValue()); + oprot.writeString(_iter1342.getKey()); + oprot.writeString(_iter1342.getValue()); } oprot.writeMapEnd(); } @@ -121340,10 +122637,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1319 : struct.success.entrySet()) + for (Map.Entry _iter1343 : struct.success.entrySet()) { - oprot.writeString(_iter1319.getKey()); - oprot.writeString(_iter1319.getValue()); + oprot.writeString(_iter1343.getKey()); + oprot.writeString(_iter1343.getValue()); } } } @@ -121358,15 +122655,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1320 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1320.size); - String _key1321; - String _val1322; - for (int _i1323 = 0; _i1323 < _map1320.size; ++_i1323) + org.apache.thrift.protocol.TMap _map1344 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1344.size); + String _key1345; + String _val1346; + for (int _i1347 = 0; _i1347 < _map1344.size; ++_i1347) { - _key1321 = iprot.readString(); - _val1322 = iprot.readString(); - struct.success.put(_key1321, _val1322); + _key1345 = iprot.readString(); + _val1346 = iprot.readString(); + struct.success.put(_key1345, _val1346); } } struct.setSuccessIsSet(true); @@ -121961,15 +123258,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1324 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1324.size); - String _key1325; - String _val1326; - for (int _i1327 = 0; _i1327 < _map1324.size; ++_i1327) + org.apache.thrift.protocol.TMap _map1348 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1348.size); + String _key1349; + String _val1350; + for (int _i1351 = 0; _i1351 < _map1348.size; ++_i1351) { - _key1325 = iprot.readString(); - _val1326 = iprot.readString(); - struct.part_vals.put(_key1325, _val1326); + _key1349 = iprot.readString(); + _val1350 = iprot.readString(); + struct.part_vals.put(_key1349, _val1350); } iprot.readMapEnd(); } @@ -122013,10 +123310,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1328 : struct.part_vals.entrySet()) + for (Map.Entry _iter1352 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1328.getKey()); - oprot.writeString(_iter1328.getValue()); + oprot.writeString(_iter1352.getKey()); + oprot.writeString(_iter1352.getValue()); } oprot.writeMapEnd(); } @@ -122067,10 +123364,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1329 : struct.part_vals.entrySet()) + for (Map.Entry _iter1353 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1329.getKey()); - oprot.writeString(_iter1329.getValue()); + oprot.writeString(_iter1353.getKey()); + oprot.writeString(_iter1353.getValue()); } } } @@ -122093,15 +123390,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1330 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1330.size); - String _key1331; - String _val1332; - for (int _i1333 = 0; _i1333 < _map1330.size; ++_i1333) + org.apache.thrift.protocol.TMap _map1354 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1354.size); + String _key1355; + String _val1356; + for (int _i1357 = 0; _i1357 < _map1354.size; ++_i1357) { - _key1331 = iprot.readString(); - _val1332 = iprot.readString(); - struct.part_vals.put(_key1331, _val1332); + _key1355 = iprot.readString(); + _val1356 = iprot.readString(); + struct.part_vals.put(_key1355, _val1356); } } struct.setPart_valsIsSet(true); @@ -123585,15 +124882,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1334 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1334.size); - String _key1335; - String _val1336; - for (int _i1337 = 0; _i1337 < _map1334.size; ++_i1337) + org.apache.thrift.protocol.TMap _map1358 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1358.size); + String _key1359; + String _val1360; + for (int _i1361 = 0; _i1361 < _map1358.size; ++_i1361) { - _key1335 = iprot.readString(); - _val1336 = iprot.readString(); - struct.part_vals.put(_key1335, _val1336); + _key1359 = iprot.readString(); + _val1360 = iprot.readString(); + struct.part_vals.put(_key1359, _val1360); } iprot.readMapEnd(); } @@ -123637,10 +124934,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1338 : struct.part_vals.entrySet()) + for (Map.Entry _iter1362 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1338.getKey()); - oprot.writeString(_iter1338.getValue()); + oprot.writeString(_iter1362.getKey()); + oprot.writeString(_iter1362.getValue()); } oprot.writeMapEnd(); } @@ -123691,10 +124988,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1339 : struct.part_vals.entrySet()) + for (Map.Entry _iter1363 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1339.getKey()); - oprot.writeString(_iter1339.getValue()); + oprot.writeString(_iter1363.getKey()); + oprot.writeString(_iter1363.getValue()); } } } @@ -123717,15 +125014,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1340 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1340.size); - String _key1341; - String _val1342; - for (int _i1343 = 0; _i1343 < _map1340.size; ++_i1343) + org.apache.thrift.protocol.TMap _map1364 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1364.size); + String _key1365; + String _val1366; + for (int _i1367 = 0; _i1367 < _map1364.size; ++_i1367) { - _key1341 = iprot.readString(); - _val1342 = iprot.readString(); - struct.part_vals.put(_key1341, _val1342); + _key1365 = iprot.readString(); + _val1366 = iprot.readString(); + struct.part_vals.put(_key1365, _val1366); } } struct.setPart_valsIsSet(true); @@ -126646,17 +127943,1163 @@ public String getFieldName() { tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_index_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(alter_index_result.class, metaDataMap); + } + + public alter_index_result() { + } + + public alter_index_result( + InvalidOperationException o1, + MetaException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public alter_index_result(alter_index_result other) { + if (other.isSetO1()) { + this.o1 = new InvalidOperationException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public alter_index_result deepCopy() { + return new alter_index_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public InvalidOperationException getO1() { + return this.o1; + } + + public void setO1(InvalidOperationException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((InvalidOperationException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof alter_index_result) + return this.equals((alter_index_result)that); + return false; + } + + public boolean equals(alter_index_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(alter_index_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("alter_index_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class alter_index_resultStandardSchemeFactory implements SchemeFactory { + public alter_index_resultStandardScheme getScheme() { + return new alter_index_resultStandardScheme(); + } + } + + private static class alter_index_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_index_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_index_resultTupleSchemeFactory implements SchemeFactory { + public alter_index_resultTupleScheme getScheme() { + return new alter_index_resultTupleScheme(); + } + } + + private static class alter_index_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_args"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new drop_index_by_name_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_index_by_name_argsTupleSchemeFactory()); + } + + private String db_name; // required + private String tbl_name; // required + private String index_name; // required + private boolean deleteData; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + INDEX_NAME((short)3, "index_name"), + DELETE_DATA((short)4, "deleteData"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // INDEX_NAME + return INDEX_NAME; + case 4: // DELETE_DATA + return DELETE_DATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __DELETEDATA_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_args.class, metaDataMap); + } + + public drop_index_by_name_args() { + } + + public drop_index_by_name_args( + String db_name, + String tbl_name, + String index_name, + boolean deleteData) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.index_name = index_name; + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public drop_index_by_name_args(drop_index_by_name_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetIndex_name()) { + this.index_name = other.index_name; + } + this.deleteData = other.deleteData; + } + + public drop_index_by_name_args deepCopy() { + return new drop_index_by_name_args(this); + } + + @Override + public void clear() { + this.db_name = null; + this.tbl_name = null; + this.index_name = null; + setDeleteDataIsSet(false); + this.deleteData = false; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public String getIndex_name() { + return this.index_name; + } + + public void setIndex_name(String index_name) { + this.index_name = index_name; + } + + public void unsetIndex_name() { + this.index_name = null; + } + + /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ + public boolean isSetIndex_name() { + return this.index_name != null; + } + + public void setIndex_nameIsSet(boolean value) { + if (!value) { + this.index_name = null; + } + } + + public boolean isDeleteData() { + return this.deleteData; + } + + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + public void unsetDeleteData() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID); + } + + /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ + public boolean isSetDeleteData() { + return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID); + } + + public void setDeleteDataIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case INDEX_NAME: + if (value == null) { + unsetIndex_name(); + } else { + setIndex_name((String)value); + } + break; + + case DELETE_DATA: + if (value == null) { + unsetDeleteData(); + } else { + setDeleteData((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case INDEX_NAME: + return getIndex_name(); + + case DELETE_DATA: + return isDeleteData(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case INDEX_NAME: + return isSetIndex_name(); + case DELETE_DATA: + return isSetDeleteData(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_index_by_name_args) + return this.equals((drop_index_by_name_args)that); + return false; + } + + public boolean equals(drop_index_by_name_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_index_name = true && this.isSetIndex_name(); + boolean that_present_index_name = true && that.isSetIndex_name(); + if (this_present_index_name || that_present_index_name) { + if (!(this_present_index_name && that_present_index_name)) + return false; + if (!this.index_name.equals(that.index_name)) + return false; + } + + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) + return false; + if (this.deleteData != that.deleteData) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_index_name = true && (isSetIndex_name()); + list.add(present_index_name); + if (present_index_name) + list.add(index_name); + + boolean present_deleteData = true; + list.add(present_deleteData); + if (present_deleteData) + list.add(deleteData); + + return list.hashCode(); + } + + @Override + public int compareTo(drop_index_by_name_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIndex_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDeleteData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_index_by_name_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("index_name:"); + if (this.index_name == null) { + sb.append("null"); + } else { + sb.append(this.index_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class drop_index_by_name_argsStandardSchemeFactory implements SchemeFactory { + public drop_index_by_name_argsStandardScheme getScheme() { + return new drop_index_by_name_argsStandardScheme(); + } + } + + private static class drop_index_by_name_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // INDEX_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.index_name = iprot.readString(); + struct.setIndex_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DELETE_DATA + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.deleteData = iprot.readBool(); + struct.setDeleteDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.index_name != null) { + oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); + oprot.writeString(struct.index_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(struct.deleteData); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class drop_index_by_name_argsTupleSchemeFactory implements SchemeFactory { + public drop_index_by_name_argsTupleScheme getScheme() { + return new drop_index_by_name_argsTupleScheme(); + } + } + + private static class drop_index_by_name_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDb_name()) { + optionals.set(0); + } + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetIndex_name()) { + optionals.set(2); + } + if (struct.isSetDeleteData()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetIndex_name()) { + oprot.writeString(struct.index_name); + } + if (struct.isSetDeleteData()) { + oprot.writeBool(struct.deleteData); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.index_name = iprot.readString(); + struct.setIndex_nameIsSet(true); + } + if (incoming.get(3)) { + struct.deleteData = iprot.readBool(); + struct.setDeleteDataIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new drop_index_by_name_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_index_by_name_resultTupleSchemeFactory()); + } + + private boolean success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_result.class, metaDataMap); } - public alter_index_result() { + public drop_index_by_name_result() { } - public alter_index_result( - InvalidOperationException o1, + public drop_index_by_name_result( + boolean success, + NoSuchObjectException o1, MetaException o2) { this(); + this.success = success; + setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; } @@ -126664,30 +129107,56 @@ public alter_index_result( /** * Performs a deep copy on other. */ - public alter_index_result(alter_index_result other) { + public drop_index_by_name_result(drop_index_by_name_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; if (other.isSetO1()) { - this.o1 = new InvalidOperationException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { this.o2 = new MetaException(other.o2); } } - public alter_index_result deepCopy() { - return new alter_index_result(this); + public drop_index_by_name_result deepCopy() { + return new drop_index_by_name_result(this); } @Override public void clear() { + setSuccessIsSet(false); + this.success = false; this.o1 = null; this.o2 = null; } - public InvalidOperationException getO1() { + public boolean isSuccess() { + return this.success; + } + + public void setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + } + + public void unsetSuccess() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(InvalidOperationException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -126731,11 +129200,19 @@ public void setO2IsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Boolean)value); + } + break; + case O1: if (value == null) { unsetO1(); } else { - setO1((InvalidOperationException)value); + setO1((NoSuchObjectException)value); } break; @@ -126752,6 +129229,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return isSuccess(); + case O1: return getO1(); @@ -126769,6 +129249,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); case O1: return isSetO1(); case O2: @@ -126781,15 +129263,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof alter_index_result) - return this.equals((alter_index_result)that); + if (that instanceof drop_index_by_name_result) + return this.equals((drop_index_by_name_result)that); return false; } - public boolean equals(alter_index_result that) { + public boolean equals(drop_index_by_name_result that) { if (that == null) return false; + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + boolean this_present_o1 = true && this.isSetO1(); boolean that_present_o1 = true && that.isSetO1(); if (this_present_o1 || that_present_o1) { @@ -126815,6 +129306,11 @@ public boolean equals(alter_index_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true; + list.add(present_success); + if (present_success) + list.add(success); + boolean present_o1 = true && (isSetO1()); list.add(present_o1); if (present_o1) @@ -126829,13 +129325,23 @@ public int hashCode() { } @Override - public int compareTo(alter_index_result other) { + public int compareTo(drop_index_by_name_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); if (lastComparison != 0) { return lastComparison; @@ -126873,9 +129379,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("alter_index_result("); + StringBuilder sb = new StringBuilder("drop_index_by_name_result("); boolean first = true; + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); sb.append("o1:"); if (this.o1 == null) { sb.append("null"); @@ -126910,21 +129420,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class alter_index_resultStandardSchemeFactory implements SchemeFactory { - public alter_index_resultStandardScheme getScheme() { - return new alter_index_resultStandardScheme(); + private static class drop_index_by_name_resultStandardSchemeFactory implements SchemeFactory { + public drop_index_by_name_resultStandardScheme getScheme() { + return new drop_index_by_name_resultStandardScheme(); } } - private static class alter_index_resultStandardScheme extends StandardScheme { + private static class drop_index_by_name_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -126934,9 +129446,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new InvalidOperationException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -126961,10 +129481,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_index_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -126981,25 +129506,31 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_index_result } - private static class alter_index_resultTupleSchemeFactory implements SchemeFactory { - public alter_index_resultTupleScheme getScheme() { - return new alter_index_resultTupleScheme(); + private static class drop_index_by_name_resultTupleSchemeFactory implements SchemeFactory { + public drop_index_by_name_resultTupleScheme getScheme() { + return new drop_index_by_name_resultTupleScheme(); } } - private static class alter_index_resultTupleScheme extends TupleScheme { + private static class drop_index_by_name_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } if (struct.isSetO1()) { struct.o1.write(oprot); } @@ -127009,15 +129540,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_index_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.o1 = new InvalidOperationException(); + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); @@ -127027,31 +129562,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_index_result s } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_index_by_name_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_index_by_name_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_index_by_name_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_by_name_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required private String index_name; // required - private boolean deleteData; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - INDEX_NAME((short)3, "index_name"), - DELETE_DATA((short)4, "deleteData"); + INDEX_NAME((short)3, "index_name"); private static final Map byName = new HashMap(); @@ -127072,8 +129604,6 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // INDEX_NAME return INDEX_NAME; - case 4: // DELETE_DATA - return DELETE_DATA; default: return null; } @@ -127114,8 +129644,6 @@ public String getFieldName() { } // isset id assignments - private static final int __DELETEDATA_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -127125,34 +129653,28 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_args.class, metaDataMap); } - public drop_index_by_name_args() { + public get_index_by_name_args() { } - public drop_index_by_name_args( + public get_index_by_name_args( String db_name, String tbl_name, - String index_name, - boolean deleteData) + String index_name) { this(); this.db_name = db_name; this.tbl_name = tbl_name; this.index_name = index_name; - this.deleteData = deleteData; - setDeleteDataIsSet(true); } /** * Performs a deep copy on other. */ - public drop_index_by_name_args(drop_index_by_name_args other) { - __isset_bitfield = other.__isset_bitfield; + public get_index_by_name_args(get_index_by_name_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -127162,11 +129684,10 @@ public drop_index_by_name_args(drop_index_by_name_args other) { if (other.isSetIndex_name()) { this.index_name = other.index_name; } - this.deleteData = other.deleteData; } - public drop_index_by_name_args deepCopy() { - return new drop_index_by_name_args(this); + public get_index_by_name_args deepCopy() { + return new get_index_by_name_args(this); } @Override @@ -127174,8 +129695,6 @@ public void clear() { this.db_name = null; this.tbl_name = null; this.index_name = null; - setDeleteDataIsSet(false); - this.deleteData = false; } public String getDb_name() { @@ -127247,28 +129766,6 @@ public void setIndex_nameIsSet(boolean value) { } } - public boolean isDeleteData() { - return this.deleteData; - } - - public void setDeleteData(boolean deleteData) { - this.deleteData = deleteData; - setDeleteDataIsSet(true); - } - - public void unsetDeleteData() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID); - } - - /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ - public boolean isSetDeleteData() { - return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID); - } - - public void setDeleteDataIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -127295,14 +129792,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case DELETE_DATA: - if (value == null) { - unsetDeleteData(); - } else { - setDeleteData((Boolean)value); - } - break; - } } @@ -127317,9 +129806,6 @@ public Object getFieldValue(_Fields field) { case INDEX_NAME: return getIndex_name(); - case DELETE_DATA: - return isDeleteData(); - } throw new IllegalStateException(); } @@ -127337,8 +129823,6 @@ public boolean isSet(_Fields field) { return isSetTbl_name(); case INDEX_NAME: return isSetIndex_name(); - case DELETE_DATA: - return isSetDeleteData(); } throw new IllegalStateException(); } @@ -127347,12 +129831,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_index_by_name_args) - return this.equals((drop_index_by_name_args)that); + if (that instanceof get_index_by_name_args) + return this.equals((get_index_by_name_args)that); return false; } - public boolean equals(drop_index_by_name_args that) { + public boolean equals(get_index_by_name_args that) { if (that == null) return false; @@ -127383,15 +129867,6 @@ public boolean equals(drop_index_by_name_args that) { return false; } - boolean this_present_deleteData = true; - boolean that_present_deleteData = true; - if (this_present_deleteData || that_present_deleteData) { - if (!(this_present_deleteData && that_present_deleteData)) - return false; - if (this.deleteData != that.deleteData) - return false; - } - return true; } @@ -127414,16 +129889,11 @@ public int hashCode() { if (present_index_name) list.add(index_name); - boolean present_deleteData = true; - list.add(present_deleteData); - if (present_deleteData) - list.add(deleteData); - return list.hashCode(); } @Override - public int compareTo(drop_index_by_name_args other) { + public int compareTo(get_index_by_name_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -127460,16 +129930,6 @@ public int compareTo(drop_index_by_name_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDeleteData()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -127487,7 +129947,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_index_by_name_args("); + StringBuilder sb = new StringBuilder("get_index_by_name_args("); boolean first = true; sb.append("db_name:"); @@ -127513,10 +129973,6 @@ public String toString() { sb.append(this.index_name); } first = false; - if (!first) sb.append(", "); - sb.append("deleteData:"); - sb.append(this.deleteData); - first = false; sb.append(")"); return sb.toString(); } @@ -127536,23 +129992,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class drop_index_by_name_argsStandardSchemeFactory implements SchemeFactory { - public drop_index_by_name_argsStandardScheme getScheme() { - return new drop_index_by_name_argsStandardScheme(); + private static class get_index_by_name_argsStandardSchemeFactory implements SchemeFactory { + public get_index_by_name_argsStandardScheme getScheme() { + return new get_index_by_name_argsStandardScheme(); } } - private static class drop_index_by_name_argsStandardScheme extends StandardScheme { + private static class get_index_by_name_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -127586,14 +130040,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // DELETE_DATA - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.deleteData = iprot.readBool(); - struct.setDeleteDataIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -127603,7 +130049,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -127622,25 +130068,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name oprot.writeString(struct.index_name); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); - oprot.writeBool(struct.deleteData); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class drop_index_by_name_argsTupleSchemeFactory implements SchemeFactory { - public drop_index_by_name_argsTupleScheme getScheme() { - return new drop_index_by_name_argsTupleScheme(); + private static class get_index_by_name_argsTupleSchemeFactory implements SchemeFactory { + public get_index_by_name_argsTupleScheme getScheme() { + return new get_index_by_name_argsTupleScheme(); } } - private static class drop_index_by_name_argsTupleScheme extends TupleScheme { + private static class get_index_by_name_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -127652,10 +130095,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ if (struct.isSetIndex_name()) { optionals.set(2); } - if (struct.isSetDeleteData()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } @@ -127665,15 +130105,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ if (struct.isSetIndex_name()) { oprot.writeString(struct.index_name); } - if (struct.isSetDeleteData()) { - oprot.writeBool(struct.deleteData); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -127686,31 +130123,27 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_a struct.index_name = iprot.readString(); struct.setIndex_nameIsSet(true); } - if (incoming.get(3)) { - struct.deleteData = iprot.readBool(); - struct.setDeleteDataIsSet(true); - } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_index_by_name_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_index_by_name_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_index_by_name_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_by_name_resultTupleSchemeFactory()); } - private boolean success; // required - private NoSuchObjectException o1; // required - private MetaException o2; // required + private Index success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -127777,32 +130210,29 @@ public String getFieldName() { } // isset id assignments - private static final int __SUCCESS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_result.class, metaDataMap); } - public drop_index_by_name_result() { + public get_index_by_name_result() { } - public drop_index_by_name_result( - boolean success, - NoSuchObjectException o1, - MetaException o2) + public get_index_by_name_result( + Index success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; - setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; } @@ -127810,56 +130240,57 @@ public drop_index_by_name_result( /** * Performs a deep copy on other. */ - public drop_index_by_name_result(drop_index_by_name_result other) { - __isset_bitfield = other.__isset_bitfield; - this.success = other.success; + public get_index_by_name_result(get_index_by_name_result other) { + if (other.isSetSuccess()) { + this.success = new Index(other.success); + } if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } - public drop_index_by_name_result deepCopy() { - return new drop_index_by_name_result(this); + public get_index_by_name_result deepCopy() { + return new get_index_by_name_result(this); } @Override public void clear() { - setSuccessIsSet(false); - this.success = false; + this.success = null; this.o1 = null; this.o2 = null; } - public boolean isSuccess() { + public Index getSuccess() { return this.success; } - public void setSuccess(boolean success) { + public void setSuccess(Index success) { this.success = success; - setSuccessIsSet(true); } public void unsetSuccess() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + this.success = null; } /** Returns true if field success is set (has been assigned a value) and false otherwise */ public boolean isSetSuccess() { - return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + return this.success != null; } public void setSuccessIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + if (!value) { + this.success = null; + } } - public NoSuchObjectException getO1() { + public MetaException getO1() { return this.o1; } - public void setO1(NoSuchObjectException o1) { + public void setO1(MetaException o1) { this.o1 = o1; } @@ -127878,11 +130309,11 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -127907,7 +130338,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Boolean)value); + setSuccess((Index)value); } break; @@ -127915,7 +130346,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchObjectException)value); + setO1((MetaException)value); } break; @@ -127923,7 +130354,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -127933,7 +130364,7 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case SUCCESS: - return isSuccess(); + return getSuccess(); case O1: return getO1(); @@ -127966,21 +130397,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_index_by_name_result) - return this.equals((drop_index_by_name_result)that); + if (that instanceof get_index_by_name_result) + return this.equals((get_index_by_name_result)that); return false; } - public boolean equals(drop_index_by_name_result that) { + public boolean equals(get_index_by_name_result that) { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (this.success != that.success) + if (!this.success.equals(that.success)) return false; } @@ -128009,7 +130440,7 @@ public boolean equals(drop_index_by_name_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true; + boolean present_success = true && (isSetSuccess()); list.add(present_success); if (present_success) list.add(success); @@ -128028,7 +130459,7 @@ public int hashCode() { } @Override - public int compareTo(drop_index_by_name_result other) { + public int compareTo(get_index_by_name_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -128082,11 +130513,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_index_by_name_result("); + StringBuilder sb = new StringBuilder("get_index_by_name_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -128111,6 +130546,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -128123,23 +130561,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class drop_index_by_name_resultStandardSchemeFactory implements SchemeFactory { - public drop_index_by_name_resultStandardScheme getScheme() { - return new drop_index_by_name_resultStandardScheme(); + private static class get_index_by_name_resultStandardSchemeFactory implements SchemeFactory { + public get_index_by_name_resultStandardScheme getScheme() { + return new get_index_by_name_resultStandardScheme(); } } - private static class drop_index_by_name_resultStandardScheme extends StandardScheme { + private static class get_index_by_name_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -128150,8 +130586,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new Index(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -128159,7 +130596,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -128168,7 +130605,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -128184,13 +130621,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { + if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -128209,16 +130646,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name } - private static class drop_index_by_name_resultTupleSchemeFactory implements SchemeFactory { - public drop_index_by_name_resultTupleScheme getScheme() { - return new drop_index_by_name_resultTupleScheme(); + private static class get_index_by_name_resultTupleSchemeFactory implements SchemeFactory { + public get_index_by_name_resultTupleScheme getScheme() { + return new get_index_by_name_resultTupleScheme(); } } - private static class drop_index_by_name_resultTupleScheme extends TupleScheme { + private static class get_index_by_name_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -128232,7 +130669,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ } oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -128243,20 +130680,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = iprot.readBool(); + struct.success = new Index(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -128265,28 +130703,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_r } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_indexes_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField MAX_INDEXES_FIELD_DESC = new org.apache.thrift.protocol.TField("max_indexes", org.apache.thrift.protocol.TType.I16, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_by_name_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_by_name_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_indexes_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_indexes_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required - private String index_name; // required + private short max_indexes; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - INDEX_NAME((short)3, "index_name"); + MAX_INDEXES((short)3, "max_indexes"); private static final Map byName = new HashMap(); @@ -128305,8 +130743,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // INDEX_NAME - return INDEX_NAME; + case 3: // MAX_INDEXES + return MAX_INDEXES; default: return null; } @@ -128347,6 +130785,8 @@ public String getFieldName() { } // isset id assignments + private static final int __MAX_INDEXES_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128354,50 +130794,53 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_args.class, metaDataMap); } - public get_index_by_name_args() { + public get_indexes_args() { + this.max_indexes = (short)-1; + } - public get_index_by_name_args( + public get_indexes_args( String db_name, String tbl_name, - String index_name) + short max_indexes) { this(); this.db_name = db_name; this.tbl_name = tbl_name; - this.index_name = index_name; + this.max_indexes = max_indexes; + setMax_indexesIsSet(true); } /** * Performs a deep copy on other. */ - public get_index_by_name_args(get_index_by_name_args other) { + public get_indexes_args(get_indexes_args other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDb_name()) { this.db_name = other.db_name; } if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } - if (other.isSetIndex_name()) { - this.index_name = other.index_name; - } + this.max_indexes = other.max_indexes; } - public get_index_by_name_args deepCopy() { - return new get_index_by_name_args(this); + public get_indexes_args deepCopy() { + return new get_indexes_args(this); } @Override public void clear() { this.db_name = null; this.tbl_name = null; - this.index_name = null; + this.max_indexes = (short)-1; + } public String getDb_name() { @@ -128446,27 +130889,26 @@ public void setTbl_nameIsSet(boolean value) { } } - public String getIndex_name() { - return this.index_name; + public short getMax_indexes() { + return this.max_indexes; } - public void setIndex_name(String index_name) { - this.index_name = index_name; + public void setMax_indexes(short max_indexes) { + this.max_indexes = max_indexes; + setMax_indexesIsSet(true); } - public void unsetIndex_name() { - this.index_name = null; + public void unsetMax_indexes() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); } - /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ - public boolean isSetIndex_name() { - return this.index_name != null; + /** Returns true if field max_indexes is set (has been assigned a value) and false otherwise */ + public boolean isSetMax_indexes() { + return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); } - public void setIndex_nameIsSet(boolean value) { - if (!value) { - this.index_name = null; - } + public void setMax_indexesIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID, value); } public void setFieldValue(_Fields field, Object value) { @@ -128487,11 +130929,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case INDEX_NAME: + case MAX_INDEXES: if (value == null) { - unsetIndex_name(); + unsetMax_indexes(); } else { - setIndex_name((String)value); + setMax_indexes((Short)value); } break; @@ -128506,8 +130948,8 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); - case INDEX_NAME: - return getIndex_name(); + case MAX_INDEXES: + return getMax_indexes(); } throw new IllegalStateException(); @@ -128524,8 +130966,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); - case INDEX_NAME: - return isSetIndex_name(); + case MAX_INDEXES: + return isSetMax_indexes(); } throw new IllegalStateException(); } @@ -128534,12 +130976,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_by_name_args) - return this.equals((get_index_by_name_args)that); + if (that instanceof get_indexes_args) + return this.equals((get_indexes_args)that); return false; } - public boolean equals(get_index_by_name_args that) { + public boolean equals(get_indexes_args that) { if (that == null) return false; @@ -128561,12 +131003,12 @@ public boolean equals(get_index_by_name_args that) { return false; } - boolean this_present_index_name = true && this.isSetIndex_name(); - boolean that_present_index_name = true && that.isSetIndex_name(); - if (this_present_index_name || that_present_index_name) { - if (!(this_present_index_name && that_present_index_name)) + boolean this_present_max_indexes = true; + boolean that_present_max_indexes = true; + if (this_present_max_indexes || that_present_max_indexes) { + if (!(this_present_max_indexes && that_present_max_indexes)) return false; - if (!this.index_name.equals(that.index_name)) + if (this.max_indexes != that.max_indexes) return false; } @@ -128587,16 +131029,16 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); - boolean present_index_name = true && (isSetIndex_name()); - list.add(present_index_name); - if (present_index_name) - list.add(index_name); + boolean present_max_indexes = true; + list.add(present_max_indexes); + if (present_max_indexes) + list.add(max_indexes); return list.hashCode(); } @Override - public int compareTo(get_index_by_name_args other) { + public int compareTo(get_indexes_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -128623,12 +131065,12 @@ public int compareTo(get_index_by_name_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); + lastComparison = Boolean.valueOf(isSetMax_indexes()).compareTo(other.isSetMax_indexes()); if (lastComparison != 0) { return lastComparison; } - if (isSetIndex_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); + if (isSetMax_indexes()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_indexes, other.max_indexes); if (lastComparison != 0) { return lastComparison; } @@ -128650,7 +131092,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_by_name_args("); + StringBuilder sb = new StringBuilder("get_indexes_args("); boolean first = true; sb.append("db_name:"); @@ -128669,12 +131111,8 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("index_name:"); - if (this.index_name == null) { - sb.append("null"); - } else { - sb.append(this.index_name); - } + sb.append("max_indexes:"); + sb.append(this.max_indexes); first = false; sb.append(")"); return sb.toString(); @@ -128695,21 +131133,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_index_by_name_argsStandardSchemeFactory implements SchemeFactory { - public get_index_by_name_argsStandardScheme getScheme() { - return new get_index_by_name_argsStandardScheme(); + private static class get_indexes_argsStandardSchemeFactory implements SchemeFactory { + public get_indexes_argsStandardScheme getScheme() { + return new get_indexes_argsStandardScheme(); } } - private static class get_index_by_name_argsStandardScheme extends StandardScheme { + private static class get_indexes_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -128735,10 +131175,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_a org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // INDEX_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.index_name = iprot.readString(); - struct.setIndex_nameIsSet(true); + case 3: // MAX_INDEXES + if (schemeField.type == org.apache.thrift.protocol.TType.I16) { + struct.max_indexes = iprot.readI16(); + struct.setMax_indexesIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -128752,7 +131192,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -128766,27 +131206,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_ oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } - if (struct.index_name != null) { - oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); - oprot.writeString(struct.index_name); - oprot.writeFieldEnd(); - } + oprot.writeFieldBegin(MAX_INDEXES_FIELD_DESC); + oprot.writeI16(struct.max_indexes); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_index_by_name_argsTupleSchemeFactory implements SchemeFactory { - public get_index_by_name_argsTupleScheme getScheme() { - return new get_index_by_name_argsTupleScheme(); + private static class get_indexes_argsTupleSchemeFactory implements SchemeFactory { + public get_indexes_argsTupleScheme getScheme() { + return new get_indexes_argsTupleScheme(); } } - private static class get_index_by_name_argsTupleScheme extends TupleScheme { + private static class get_indexes_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -128795,7 +131233,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_a if (struct.isSetTbl_name()) { optionals.set(1); } - if (struct.isSetIndex_name()) { + if (struct.isSetMax_indexes()) { optionals.set(2); } oprot.writeBitSet(optionals, 3); @@ -128805,13 +131243,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_a if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } - if (struct.isSetIndex_name()) { - oprot.writeString(struct.index_name); + if (struct.isSetMax_indexes()) { + oprot.writeI16(struct.max_indexes); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { @@ -128823,30 +131261,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_ar struct.setTbl_nameIsSet(true); } if (incoming.get(2)) { - struct.index_name = iprot.readString(); - struct.setIndex_nameIsSet(true); + struct.max_indexes = iprot.readI16(); + struct.setMax_indexesIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_indexes_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_by_name_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_by_name_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_indexes_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_indexes_resultTupleSchemeFactory()); } - private Index success; // required - private MetaException o1; // required - private NoSuchObjectException o2; // required + private List success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -128917,22 +131355,23 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class))); + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_result.class, metaDataMap); } - public get_index_by_name_result() { + public get_indexes_result() { } - public get_index_by_name_result( - Index success, - MetaException o1, - NoSuchObjectException o2) + public get_indexes_result( + List success, + NoSuchObjectException o1, + MetaException o2) { this(); this.success = success; @@ -128943,20 +131382,24 @@ public get_index_by_name_result( /** * Performs a deep copy on other. */ - public get_index_by_name_result(get_index_by_name_result other) { + public get_indexes_result(get_indexes_result other) { if (other.isSetSuccess()) { - this.success = new Index(other.success); + List __this__success = new ArrayList(other.success.size()); + for (Index other_element : other.success) { + __this__success.add(new Index(other_element)); + } + this.success = __this__success; } if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { - this.o2 = new NoSuchObjectException(other.o2); + this.o2 = new MetaException(other.o2); } } - public get_index_by_name_result deepCopy() { - return new get_index_by_name_result(this); + public get_indexes_result deepCopy() { + return new get_indexes_result(this); } @Override @@ -128966,11 +131409,26 @@ public void clear() { this.o2 = null; } - public Index getSuccess() { + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(Index elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { return this.success; } - public void setSuccess(Index success) { + public void setSuccess(List success) { this.success = success; } @@ -128989,11 +131447,11 @@ public void setSuccessIsSet(boolean value) { } } - public MetaException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(MetaException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -129012,11 +131470,11 @@ public void setO1IsSet(boolean value) { } } - public NoSuchObjectException getO2() { + public MetaException getO2() { return this.o2; } - public void setO2(NoSuchObjectException o2) { + public void setO2(MetaException o2) { this.o2 = o2; } @@ -129041,7 +131499,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Index)value); + setSuccess((List)value); } break; @@ -129049,7 +131507,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((MetaException)value); + setO1((NoSuchObjectException)value); } break; @@ -129057,7 +131515,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((NoSuchObjectException)value); + setO2((MetaException)value); } break; @@ -129100,12 +131558,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_by_name_result) - return this.equals((get_index_by_name_result)that); + if (that instanceof get_indexes_result) + return this.equals((get_indexes_result)that); return false; } - public boolean equals(get_index_by_name_result that) { + public boolean equals(get_indexes_result that) { if (that == null) return false; @@ -129162,7 +131620,7 @@ public int hashCode() { } @Override - public int compareTo(get_index_by_name_result other) { + public int compareTo(get_indexes_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -129216,7 +131674,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_by_name_result("); + StringBuilder sb = new StringBuilder("get_indexes_result("); boolean first = true; sb.append("success:"); @@ -129249,9 +131707,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -129270,15 +131725,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_index_by_name_resultStandardSchemeFactory implements SchemeFactory { - public get_index_by_name_resultStandardScheme getScheme() { - return new get_index_by_name_resultStandardScheme(); + private static class get_indexes_resultStandardSchemeFactory implements SchemeFactory { + public get_indexes_resultStandardScheme getScheme() { + return new get_indexes_resultStandardScheme(); } } - private static class get_index_by_name_resultStandardScheme extends StandardScheme { + private static class get_indexes_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -129289,9 +131744,19 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_r } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new Index(); - struct.success.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.success = new ArrayList(_list1368.size); + Index _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) + { + _elem1369 = new Index(); + _elem1369.read(iprot); + struct.success.add(_elem1369); + } + iprot.readListEnd(); + } struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -129299,7 +131764,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_r break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -129308,7 +131773,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_r break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -129324,13 +131789,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (Index _iter1371 : struct.success) + { + _iter1371.write(oprot); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -129349,16 +131821,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_ } - private static class get_index_by_name_resultTupleSchemeFactory implements SchemeFactory { - public get_index_by_name_resultTupleScheme getScheme() { - return new get_index_by_name_resultTupleScheme(); + private static class get_indexes_resultTupleSchemeFactory implements SchemeFactory { + public get_indexes_resultTupleScheme getScheme() { + return new get_indexes_resultTupleScheme(); } } - private static class get_index_by_name_resultTupleScheme extends TupleScheme { + private static class get_indexes_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -129372,7 +131844,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_r } oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { - struct.success.write(oprot); + { + oprot.writeI32(struct.success.size()); + for (Index _iter1372 : struct.success) + { + _iter1372.write(oprot); + } + } } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -129383,21 +131861,30 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new Index(); - struct.success.read(iprot); + { + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1373.size); + Index _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) + { + _elem1374 = new Index(); + _elem1374.read(iprot); + struct.success.add(_elem1374); + } + } struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -129406,8 +131893,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_re } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_indexes_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_names_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); @@ -129415,8 +131902,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_re private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_indexes_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_indexes_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_index_names_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_names_argsTupleSchemeFactory()); } private String db_name; // required @@ -129500,15 +131987,15 @@ public String getFieldName() { tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_args.class, metaDataMap); } - public get_indexes_args() { + public get_index_names_args() { this.max_indexes = (short)-1; } - public get_indexes_args( + public get_index_names_args( String db_name, String tbl_name, short max_indexes) @@ -129523,7 +132010,7 @@ public get_indexes_args( /** * Performs a deep copy on other. */ - public get_indexes_args(get_indexes_args other) { + public get_index_names_args(get_index_names_args other) { __isset_bitfield = other.__isset_bitfield; if (other.isSetDb_name()) { this.db_name = other.db_name; @@ -129534,8 +132021,8 @@ public get_indexes_args(get_indexes_args other) { this.max_indexes = other.max_indexes; } - public get_indexes_args deepCopy() { - return new get_indexes_args(this); + public get_index_names_args deepCopy() { + return new get_index_names_args(this); } @Override @@ -129679,12 +132166,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_indexes_args) - return this.equals((get_indexes_args)that); + if (that instanceof get_index_names_args) + return this.equals((get_index_names_args)that); return false; } - public boolean equals(get_indexes_args that) { + public boolean equals(get_index_names_args that) { if (that == null) return false; @@ -129741,7 +132228,7 @@ public int hashCode() { } @Override - public int compareTo(get_indexes_args other) { + public int compareTo(get_index_names_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -129795,7 +132282,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_indexes_args("); + StringBuilder sb = new StringBuilder("get_index_names_args("); boolean first = true; sb.append("db_name:"); @@ -129844,15 +132331,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_indexes_argsStandardSchemeFactory implements SchemeFactory { - public get_indexes_argsStandardScheme getScheme() { - return new get_indexes_argsStandardScheme(); + private static class get_index_names_argsStandardSchemeFactory implements SchemeFactory { + public get_index_names_argsStandardScheme getScheme() { + return new get_index_names_argsStandardScheme(); } } - private static class get_indexes_argsStandardScheme extends StandardScheme { + private static class get_index_names_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -129895,7 +132382,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -129918,16 +132405,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_args s } - private static class get_indexes_argsTupleSchemeFactory implements SchemeFactory { - public get_indexes_argsTupleScheme getScheme() { - return new get_indexes_argsTupleScheme(); + private static class get_index_names_argsTupleSchemeFactory implements SchemeFactory { + public get_index_names_argsTupleScheme getScheme() { + return new get_index_names_argsTupleScheme(); } } - private static class get_indexes_argsTupleScheme extends TupleScheme { + private static class get_index_names_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -129952,7 +132439,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_args st } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { @@ -129972,28 +132459,25 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args str } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_indexes_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_names_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_indexes_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_indexes_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_index_names_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_names_resultTupleSchemeFactory()); } - private List success; // required - private NoSuchObjectException o1; // required + private List success; // required private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)1, "o2"); private static final Map byName = new HashMap(); @@ -130010,9 +132494,7 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; - case 1: // O1 - return O1; - case 2: // O2 + case 1: // O2 return O2; default: return null; @@ -130059,56 +132541,45 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class)))); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_result.class, metaDataMap); } - public get_indexes_result() { + public get_index_names_result() { } - public get_indexes_result( - List success, - NoSuchObjectException o1, + public get_index_names_result( + List success, MetaException o2) { this(); this.success = success; - this.o1 = o1; this.o2 = o2; } /** * Performs a deep copy on other. */ - public get_indexes_result(get_indexes_result other) { + public get_index_names_result(get_index_names_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (Index other_element : other.success) { - __this__success.add(new Index(other_element)); - } + List __this__success = new ArrayList(other.success); this.success = __this__success; } - if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); - } if (other.isSetO2()) { this.o2 = new MetaException(other.o2); } } - public get_indexes_result deepCopy() { - return new get_indexes_result(this); + public get_index_names_result deepCopy() { + return new get_index_names_result(this); } @Override public void clear() { this.success = null; - this.o1 = null; this.o2 = null; } @@ -130116,22 +132587,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(Index elem) { + public void addToSuccess(String elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -130150,29 +132621,6 @@ public void setSuccessIsSet(boolean value) { } } - public NoSuchObjectException getO1() { - return this.o1; - } - - public void setO1(NoSuchObjectException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ - public boolean isSetO1() { - return this.o1 != null; - } - - public void setO1IsSet(boolean value) { - if (!value) { - this.o1 = null; - } - } - public MetaException getO2() { return this.o2; } @@ -130202,15 +132650,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); - } - break; - - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((NoSuchObjectException)value); + setSuccess((List)value); } break; @@ -130230,9 +132670,6 @@ public Object getFieldValue(_Fields field) { case SUCCESS: return getSuccess(); - case O1: - return getO1(); - case O2: return getO2(); @@ -130249,8 +132686,6 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return isSetSuccess(); - case O1: - return isSetO1(); case O2: return isSetO2(); } @@ -130261,12 +132696,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_indexes_result) - return this.equals((get_indexes_result)that); + if (that instanceof get_index_names_result) + return this.equals((get_index_names_result)that); return false; } - public boolean equals(get_indexes_result that) { + public boolean equals(get_index_names_result that) { if (that == null) return false; @@ -130279,15 +132714,6 @@ public boolean equals(get_indexes_result that) { return false; } - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -130309,11 +132735,6 @@ public int hashCode() { if (present_success) list.add(success); - boolean present_o1 = true && (isSetO1()); - list.add(present_o1); - if (present_o1) - list.add(o1); - boolean present_o2 = true && (isSetO2()); list.add(present_o2); if (present_o2) @@ -130323,7 +132744,7 @@ public int hashCode() { } @Override - public int compareTo(get_indexes_result other) { + public int compareTo(get_index_names_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -130340,16 +132761,6 @@ public int compareTo(get_indexes_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO1()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); if (lastComparison != 0) { return lastComparison; @@ -130377,7 +132788,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_indexes_result("); + StringBuilder sb = new StringBuilder("get_index_names_result("); boolean first = true; sb.append("success:"); @@ -130388,14 +132799,6 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -130428,15 +132831,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_indexes_resultStandardSchemeFactory implements SchemeFactory { - public get_indexes_resultStandardScheme getScheme() { - return new get_indexes_resultStandardScheme(); + private static class get_index_names_resultStandardSchemeFactory implements SchemeFactory { + public get_index_names_resultStandardScheme getScheme() { + return new get_index_names_resultStandardScheme(); } } - private static class get_indexes_resultStandardScheme extends StandardScheme { + private static class get_index_names_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -130449,14 +132852,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); - struct.success = new ArrayList(_list1344.size); - Index _elem1345; - for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) + org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); + struct.success = new ArrayList(_list1376.size); + String _elem1377; + for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) { - _elem1345 = new Index(); - _elem1345.read(iprot); - struct.success.add(_elem1345); + _elem1377 = iprot.readString(); + struct.success.add(_elem1377); } iprot.readListEnd(); } @@ -130465,16 +132867,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 1: // O1 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // O2 + case 1: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.o2 = new MetaException(); struct.o2.read(iprot); @@ -130492,27 +132885,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter1347 : struct.success) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter1379 : struct.success) { - _iter1347.write(oprot); + oprot.writeString(_iter1379); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } - if (struct.o1 != null) { - oprot.writeFieldBegin(O1_FIELD_DESC); - struct.o1.write(oprot); - oprot.writeFieldEnd(); - } if (struct.o2 != null) { oprot.writeFieldBegin(O2_FIELD_DESC); struct.o2.write(oprot); @@ -130524,69 +132912,57 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result } - private static class get_indexes_resultTupleSchemeFactory implements SchemeFactory { - public get_indexes_resultTupleScheme getScheme() { - return new get_indexes_resultTupleScheme(); + private static class get_index_names_resultTupleSchemeFactory implements SchemeFactory { + public get_index_names_resultTupleScheme getScheme() { + return new get_index_names_resultTupleScheme(); } } - private static class get_indexes_resultTupleScheme extends TupleScheme { + private static class get_index_names_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO1()) { - optionals.set(1); - } if (struct.isSetO2()) { - optionals.set(2); + optionals.set(1); } - oprot.writeBitSet(optionals, 3); + oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1348 : struct.success) + for (String _iter1380 : struct.success) { - _iter1348.write(oprot); + oprot.writeString(_iter1380); } } } - if (struct.isSetO1()) { - struct.o1.write(oprot); - } if (struct.isSetO2()) { struct.o2.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1349.size); - Index _elem1350; - for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) + org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1381.size); + String _elem1382; + for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) { - _elem1350 = new Index(); - _elem1350.read(iprot); - struct.success.add(_elem1350); + _elem1382 = iprot.readString(); + struct.success.add(_elem1382); } } struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } - if (incoming.get(2)) { struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); @@ -130596,28 +132972,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_names_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_primary_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_primary_keys_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField MAX_INDEXES_FIELD_DESC = new org.apache.thrift.protocol.TField("max_indexes", org.apache.thrift.protocol.TType.I16, (short)3); + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_names_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_names_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_primary_keys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_primary_keys_argsTupleSchemeFactory()); } - private String db_name; // required - private String tbl_name; // required - private short max_indexes; // required + private PrimaryKeysRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - MAX_INDEXES((short)3, "max_indexes"); + REQUEST((short)1, "request"); private static final Map byName = new HashMap(); @@ -130632,12 +133002,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TBL_NAME - return TBL_NAME; - case 3: // MAX_INDEXES - return MAX_INDEXES; + case 1: // REQUEST + return REQUEST; default: return null; } @@ -130678,155 +133044,73 @@ public String getFieldName() { } // isset id assignments - private static final int __MAX_INDEXES_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrimaryKeysRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_primary_keys_args.class, metaDataMap); } - public get_index_names_args() { - this.max_indexes = (short)-1; - + public get_primary_keys_args() { } - public get_index_names_args( - String db_name, - String tbl_name, - short max_indexes) + public get_primary_keys_args( + PrimaryKeysRequest request) { this(); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.max_indexes = max_indexes; - setMax_indexesIsSet(true); + this.request = request; } /** * Performs a deep copy on other. */ - public get_index_names_args(get_index_names_args other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetDb_name()) { - this.db_name = other.db_name; - } - if (other.isSetTbl_name()) { - this.tbl_name = other.tbl_name; + public get_primary_keys_args(get_primary_keys_args other) { + if (other.isSetRequest()) { + this.request = new PrimaryKeysRequest(other.request); } - this.max_indexes = other.max_indexes; } - public get_index_names_args deepCopy() { - return new get_index_names_args(this); + public get_primary_keys_args deepCopy() { + return new get_primary_keys_args(this); } @Override public void clear() { - this.db_name = null; - this.tbl_name = null; - this.max_indexes = (short)-1; - - } - - public String getDb_name() { - return this.db_name; - } - - public void setDb_name(String db_name) { - this.db_name = db_name; - } - - public void unsetDb_name() { - this.db_name = null; - } - - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; - } - - public void setDb_nameIsSet(boolean value) { - if (!value) { - this.db_name = null; - } + this.request = null; } - public String getTbl_name() { - return this.tbl_name; + public PrimaryKeysRequest getRequest() { + return this.request; } - public void setTbl_name(String tbl_name) { - this.tbl_name = tbl_name; + public void setRequest(PrimaryKeysRequest request) { + this.request = request; } - public void unsetTbl_name() { - this.tbl_name = null; + public void unsetRequest() { + this.request = null; } - /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_name() { - return this.tbl_name != null; + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; } - public void setTbl_nameIsSet(boolean value) { + public void setRequestIsSet(boolean value) { if (!value) { - this.tbl_name = null; + this.request = null; } } - public short getMax_indexes() { - return this.max_indexes; - } - - public void setMax_indexes(short max_indexes) { - this.max_indexes = max_indexes; - setMax_indexesIsSet(true); - } - - public void unsetMax_indexes() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); - } - - /** Returns true if field max_indexes is set (has been assigned a value) and false otherwise */ - public boolean isSetMax_indexes() { - return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); - } - - public void setMax_indexesIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: - if (value == null) { - unsetDb_name(); - } else { - setDb_name((String)value); - } - break; - - case TBL_NAME: - if (value == null) { - unsetTbl_name(); - } else { - setTbl_name((String)value); - } - break; - - case MAX_INDEXES: + case REQUEST: if (value == null) { - unsetMax_indexes(); + unsetRequest(); } else { - setMax_indexes((Short)value); + setRequest((PrimaryKeysRequest)value); } break; @@ -130835,14 +133119,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); - - case TBL_NAME: - return getTbl_name(); - - case MAX_INDEXES: - return getMax_indexes(); + case REQUEST: + return getRequest(); } throw new IllegalStateException(); @@ -130855,12 +133133,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); - case TBL_NAME: - return isSetTbl_name(); - case MAX_INDEXES: - return isSetMax_indexes(); + case REQUEST: + return isSetRequest(); } throw new IllegalStateException(); } @@ -130869,39 +133143,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_names_args) - return this.equals((get_index_names_args)that); + if (that instanceof get_primary_keys_args) + return this.equals((get_primary_keys_args)that); return false; } - public boolean equals(get_index_names_args that) { + public boolean equals(get_primary_keys_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) - return false; - if (!this.db_name.equals(that.db_name)) - return false; - } - - boolean this_present_tbl_name = true && this.isSetTbl_name(); - boolean that_present_tbl_name = true && that.isSetTbl_name(); - if (this_present_tbl_name || that_present_tbl_name) { - if (!(this_present_tbl_name && that_present_tbl_name)) - return false; - if (!this.tbl_name.equals(that.tbl_name)) - return false; - } - - boolean this_present_max_indexes = true; - boolean that_present_max_indexes = true; - if (this_present_max_indexes || that_present_max_indexes) { - if (!(this_present_max_indexes && that_present_max_indexes)) + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) return false; - if (this.max_indexes != that.max_indexes) + if (!this.request.equals(that.request)) return false; } @@ -130912,58 +133168,28 @@ public boolean equals(get_index_names_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); - - boolean present_tbl_name = true && (isSetTbl_name()); - list.add(present_tbl_name); - if (present_tbl_name) - list.add(tbl_name); - - boolean present_max_indexes = true; - list.add(present_max_indexes); - if (present_max_indexes) - list.add(max_indexes); + boolean present_request = true && (isSetRequest()); + list.add(present_request); + if (present_request) + list.add(request); return list.hashCode(); } @Override - public int compareTo(get_index_names_args other) { + public int compareTo(get_primary_keys_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTbl_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetMax_indexes()).compareTo(other.isSetMax_indexes()); + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(other.isSetRequest()); if (lastComparison != 0) { return lastComparison; } - if (isSetMax_indexes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_indexes, other.max_indexes); + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, other.request); if (lastComparison != 0) { return lastComparison; } @@ -130985,28 +133211,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_names_args("); + StringBuilder sb = new StringBuilder("get_primary_keys_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { - sb.append("null"); - } else { - sb.append(this.db_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("tbl_name:"); - if (this.tbl_name == null) { + sb.append("request:"); + if (this.request == null) { sb.append("null"); } else { - sb.append(this.tbl_name); + sb.append(this.request); } first = false; - if (!first) sb.append(", "); - sb.append("max_indexes:"); - sb.append(this.max_indexes); - first = false; sb.append(")"); return sb.toString(); } @@ -131014,6 +133228,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (request != null) { + request.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -131026,23 +133243,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_index_names_argsStandardSchemeFactory implements SchemeFactory { - public get_index_names_argsStandardScheme getScheme() { - return new get_index_names_argsStandardScheme(); + private static class get_primary_keys_argsStandardSchemeFactory implements SchemeFactory { + public get_primary_keys_argsStandardScheme getScheme() { + return new get_primary_keys_argsStandardScheme(); } } - private static class get_index_names_argsStandardScheme extends StandardScheme { + private static class get_primary_keys_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -131052,26 +133267,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_arg break; } switch (schemeField.id) { - case 1: // DB_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TBL_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // MAX_INDEXES - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.max_indexes = iprot.readI16(); - struct.setMax_indexesIsSet(true); + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new PrimaryKeysRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -131085,102 +133285,78 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); - oprot.writeFieldEnd(); - } - if (struct.tbl_name != null) { - oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); - oprot.writeString(struct.tbl_name); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(MAX_INDEXES_FIELD_DESC); - oprot.writeI16(struct.max_indexes); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_index_names_argsTupleSchemeFactory implements SchemeFactory { - public get_index_names_argsTupleScheme getScheme() { - return new get_index_names_argsTupleScheme(); + private static class get_primary_keys_argsTupleSchemeFactory implements SchemeFactory { + public get_primary_keys_argsTupleScheme getScheme() { + return new get_primary_keys_argsTupleScheme(); } } - private static class get_index_names_argsTupleScheme extends TupleScheme { + private static class get_primary_keys_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetRequest()) { optionals.set(0); } - if (struct.isSetTbl_name()) { - optionals.set(1); - } - if (struct.isSetMax_indexes()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); - } - if (struct.isSetTbl_name()) { - oprot.writeString(struct.tbl_name); - } - if (struct.isSetMax_indexes()) { - oprot.writeI16(struct.max_indexes); + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); - } - if (incoming.get(1)) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); - } - if (incoming.get(2)) { - struct.max_indexes = iprot.readI16(); - struct.setMax_indexesIsSet(true); + struct.request = new PrimaryKeysRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_index_names_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_primary_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_primary_keys_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_names_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_names_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_primary_keys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_primary_keys_resultTupleSchemeFactory()); } - private List success; // required - private MetaException o2; // required + private PrimaryKeysResponse success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O2((short)1, "o2"); + O1((short)1, "o1"), + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -131197,7 +133373,9 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; - case 1: // O2 + case 1: // O1 + return O1; + case 2: // O2 return O2; default: return null; @@ -131243,69 +133421,60 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrimaryKeysResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_primary_keys_result.class, metaDataMap); } - public get_index_names_result() { + public get_primary_keys_result() { } - public get_index_names_result( - List success, - MetaException o2) + public get_primary_keys_result( + PrimaryKeysResponse success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; + this.o1 = o1; this.o2 = o2; } /** * Performs a deep copy on other. */ - public get_index_names_result(get_index_names_result other) { + public get_primary_keys_result(get_primary_keys_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success); - this.success = __this__success; + this.success = new PrimaryKeysResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } - public get_index_names_result deepCopy() { - return new get_index_names_result(this); + public get_primary_keys_result deepCopy() { + return new get_primary_keys_result(this); } @Override public void clear() { this.success = null; + this.o1 = null; this.o2 = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(String elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { + public PrimaryKeysResponse getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(PrimaryKeysResponse success) { this.success = success; } @@ -131324,11 +133493,34 @@ public void setSuccessIsSet(boolean value) { } } - public MetaException getO2() { + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -131353,7 +133545,15 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((PrimaryKeysResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); } break; @@ -131361,7 +133561,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -131373,6 +133573,9 @@ public Object getFieldValue(_Fields field) { case SUCCESS: return getSuccess(); + case O1: + return getO1(); + case O2: return getO2(); @@ -131389,6 +133592,8 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); } @@ -131399,12 +133604,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_names_result) - return this.equals((get_index_names_result)that); + if (that instanceof get_primary_keys_result) + return this.equals((get_primary_keys_result)that); return false; } - public boolean equals(get_index_names_result that) { + public boolean equals(get_primary_keys_result that) { if (that == null) return false; @@ -131417,6 +133622,15 @@ public boolean equals(get_index_names_result that) { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -131438,6 +133652,11 @@ public int hashCode() { if (present_success) list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + boolean present_o2 = true && (isSetO2()); list.add(present_o2); if (present_o2) @@ -131447,7 +133666,7 @@ public int hashCode() { } @Override - public int compareTo(get_index_names_result other) { + public int compareTo(get_primary_keys_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -131464,6 +133683,16 @@ public int compareTo(get_index_names_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); if (lastComparison != 0) { return lastComparison; @@ -131491,7 +133720,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_names_result("); + StringBuilder sb = new StringBuilder("get_primary_keys_result("); boolean first = true; sb.append("success:"); @@ -131502,6 +133731,14 @@ public String toString() { } first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -131516,6 +133753,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -131534,15 +133774,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_index_names_resultStandardSchemeFactory implements SchemeFactory { - public get_index_names_resultStandardScheme getScheme() { - return new get_index_names_resultStandardScheme(); + private static class get_primary_keys_resultStandardSchemeFactory implements SchemeFactory { + public get_primary_keys_resultStandardScheme getScheme() { + return new get_primary_keys_resultStandardScheme(); } } - private static class get_index_names_resultStandardScheme extends StandardScheme { + private static class get_primary_keys_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -131553,26 +133793,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); - struct.success = new ArrayList(_list1352.size); - String _elem1353; - for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) - { - _elem1353 = iprot.readString(); - struct.success.add(_elem1353); - } - iprot.readListEnd(); - } + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new PrimaryKeysResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 1: // O2 + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -131588,20 +133828,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1355 : struct.success) - { - oprot.writeString(_iter1355); - } - oprot.writeListEnd(); - } + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); oprot.writeFieldEnd(); } if (struct.o2 != null) { @@ -131615,33 +133853,33 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re } - private static class get_index_names_resultTupleSchemeFactory implements SchemeFactory { - public get_index_names_resultTupleScheme getScheme() { - return new get_index_names_resultTupleScheme(); + private static class get_primary_keys_resultTupleSchemeFactory implements SchemeFactory { + public get_primary_keys_resultTupleScheme getScheme() { + return new get_primary_keys_resultTupleScheme(); } } - private static class get_index_names_resultTupleScheme extends TupleScheme { + private static class get_primary_keys_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (String _iter1356 : struct.success) - { - oprot.writeString(_iter1356); - } - } + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); } if (struct.isSetO2()) { struct.o2.write(oprot); @@ -131649,24 +133887,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1357.size); - String _elem1358; - for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) - { - _elem1358 = iprot.readString(); - struct.success.add(_elem1358); - } - } + struct.success = new PrimaryKeysResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o2 = new MetaException(); + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -131675,18 +133910,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_primary_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_primary_keys_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_foreign_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_foreign_keys_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_primary_keys_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_primary_keys_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_foreign_keys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_foreign_keys_argsTupleSchemeFactory()); } - private PrimaryKeysRequest request; // required + private ForeignKeysRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -131751,16 +133986,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrimaryKeysRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ForeignKeysRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_primary_keys_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_foreign_keys_args.class, metaDataMap); } - public get_primary_keys_args() { + public get_foreign_keys_args() { } - public get_primary_keys_args( - PrimaryKeysRequest request) + public get_foreign_keys_args( + ForeignKeysRequest request) { this(); this.request = request; @@ -131769,14 +134004,14 @@ public get_primary_keys_args( /** * Performs a deep copy on other. */ - public get_primary_keys_args(get_primary_keys_args other) { + public get_foreign_keys_args(get_foreign_keys_args other) { if (other.isSetRequest()) { - this.request = new PrimaryKeysRequest(other.request); + this.request = new ForeignKeysRequest(other.request); } } - public get_primary_keys_args deepCopy() { - return new get_primary_keys_args(this); + public get_foreign_keys_args deepCopy() { + return new get_foreign_keys_args(this); } @Override @@ -131784,11 +134019,11 @@ public void clear() { this.request = null; } - public PrimaryKeysRequest getRequest() { + public ForeignKeysRequest getRequest() { return this.request; } - public void setRequest(PrimaryKeysRequest request) { + public void setRequest(ForeignKeysRequest request) { this.request = request; } @@ -131813,7 +134048,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((PrimaryKeysRequest)value); + setRequest((ForeignKeysRequest)value); } break; @@ -131846,12 +134081,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_primary_keys_args) - return this.equals((get_primary_keys_args)that); + if (that instanceof get_foreign_keys_args) + return this.equals((get_foreign_keys_args)that); return false; } - public boolean equals(get_primary_keys_args that) { + public boolean equals(get_foreign_keys_args that) { if (that == null) return false; @@ -131880,7 +134115,7 @@ public int hashCode() { } @Override - public int compareTo(get_primary_keys_args other) { + public int compareTo(get_foreign_keys_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -131914,7 +134149,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_primary_keys_args("); + StringBuilder sb = new StringBuilder("get_foreign_keys_args("); boolean first = true; sb.append("request:"); @@ -131952,15 +134187,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_primary_keys_argsStandardSchemeFactory implements SchemeFactory { - public get_primary_keys_argsStandardScheme getScheme() { - return new get_primary_keys_argsStandardScheme(); + private static class get_foreign_keys_argsStandardSchemeFactory implements SchemeFactory { + public get_foreign_keys_argsStandardScheme getScheme() { + return new get_foreign_keys_argsStandardScheme(); } } - private static class get_primary_keys_argsStandardScheme extends StandardScheme { + private static class get_foreign_keys_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -131972,7 +134207,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_ar switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new PrimaryKeysRequest(); + struct.request = new ForeignKeysRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -131988,7 +134223,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_ar struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -132003,16 +134238,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_a } - private static class get_primary_keys_argsTupleSchemeFactory implements SchemeFactory { - public get_primary_keys_argsTupleScheme getScheme() { - return new get_primary_keys_argsTupleScheme(); + private static class get_foreign_keys_argsTupleSchemeFactory implements SchemeFactory { + public get_foreign_keys_argsTupleScheme getScheme() { + return new get_foreign_keys_argsTupleScheme(); } } - private static class get_primary_keys_argsTupleScheme extends TupleScheme { + private static class get_foreign_keys_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -132025,11 +134260,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_ar } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new PrimaryKeysRequest(); + struct.request = new ForeignKeysRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -132038,8 +134273,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_arg } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_primary_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_primary_keys_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_foreign_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_foreign_keys_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -132047,11 +134282,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_arg private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_primary_keys_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_primary_keys_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_foreign_keys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_foreign_keys_resultTupleSchemeFactory()); } - private PrimaryKeysResponse success; // required + private ForeignKeysResponse success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -132124,20 +134359,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrimaryKeysResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ForeignKeysResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_primary_keys_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_foreign_keys_result.class, metaDataMap); } - public get_primary_keys_result() { + public get_foreign_keys_result() { } - public get_primary_keys_result( - PrimaryKeysResponse success, + public get_foreign_keys_result( + ForeignKeysResponse success, MetaException o1, NoSuchObjectException o2) { @@ -132150,9 +134385,9 @@ public get_primary_keys_result( /** * Performs a deep copy on other. */ - public get_primary_keys_result(get_primary_keys_result other) { + public get_foreign_keys_result(get_foreign_keys_result other) { if (other.isSetSuccess()) { - this.success = new PrimaryKeysResponse(other.success); + this.success = new ForeignKeysResponse(other.success); } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); @@ -132162,8 +134397,8 @@ public get_primary_keys_result(get_primary_keys_result other) { } } - public get_primary_keys_result deepCopy() { - return new get_primary_keys_result(this); + public get_foreign_keys_result deepCopy() { + return new get_foreign_keys_result(this); } @Override @@ -132173,11 +134408,11 @@ public void clear() { this.o2 = null; } - public PrimaryKeysResponse getSuccess() { + public ForeignKeysResponse getSuccess() { return this.success; } - public void setSuccess(PrimaryKeysResponse success) { + public void setSuccess(ForeignKeysResponse success) { this.success = success; } @@ -132248,7 +134483,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((PrimaryKeysResponse)value); + setSuccess((ForeignKeysResponse)value); } break; @@ -132307,12 +134542,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_primary_keys_result) - return this.equals((get_primary_keys_result)that); + if (that instanceof get_foreign_keys_result) + return this.equals((get_foreign_keys_result)that); return false; } - public boolean equals(get_primary_keys_result that) { + public boolean equals(get_foreign_keys_result that) { if (that == null) return false; @@ -132369,7 +134604,7 @@ public int hashCode() { } @Override - public int compareTo(get_primary_keys_result other) { + public int compareTo(get_foreign_keys_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -132423,7 +134658,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_primary_keys_result("); + StringBuilder sb = new StringBuilder("get_foreign_keys_result("); boolean first = true; sb.append("success:"); @@ -132477,15 +134712,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_primary_keys_resultStandardSchemeFactory implements SchemeFactory { - public get_primary_keys_resultStandardScheme getScheme() { - return new get_primary_keys_resultStandardScheme(); + private static class get_foreign_keys_resultStandardSchemeFactory implements SchemeFactory { + public get_foreign_keys_resultStandardScheme getScheme() { + return new get_foreign_keys_resultStandardScheme(); } } - private static class get_primary_keys_resultStandardScheme extends StandardScheme { + private static class get_foreign_keys_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -132497,7 +134732,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_re switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new PrimaryKeysResponse(); + struct.success = new ForeignKeysResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -132531,7 +134766,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_re struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -132556,16 +134791,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_r } - private static class get_primary_keys_resultTupleSchemeFactory implements SchemeFactory { - public get_primary_keys_resultTupleScheme getScheme() { - return new get_primary_keys_resultTupleScheme(); + private static class get_foreign_keys_resultTupleSchemeFactory implements SchemeFactory { + public get_foreign_keys_resultTupleScheme getScheme() { + return new get_foreign_keys_resultTupleScheme(); } } - private static class get_primary_keys_resultTupleScheme extends TupleScheme { + private static class get_foreign_keys_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -132590,11 +134825,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_re } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new PrimaryKeysResponse(); + struct.success = new ForeignKeysResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -132613,18 +134848,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_res } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_foreign_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_foreign_keys_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_unique_constraints_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_unique_constraints_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_foreign_keys_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_foreign_keys_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_unique_constraints_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_unique_constraints_argsTupleSchemeFactory()); } - private ForeignKeysRequest request; // required + private UniqueConstraintsRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -132689,16 +134924,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ForeignKeysRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UniqueConstraintsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_foreign_keys_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_unique_constraints_args.class, metaDataMap); } - public get_foreign_keys_args() { + public get_unique_constraints_args() { } - public get_foreign_keys_args( - ForeignKeysRequest request) + public get_unique_constraints_args( + UniqueConstraintsRequest request) { this(); this.request = request; @@ -132707,14 +134942,14 @@ public get_foreign_keys_args( /** * Performs a deep copy on other. */ - public get_foreign_keys_args(get_foreign_keys_args other) { + public get_unique_constraints_args(get_unique_constraints_args other) { if (other.isSetRequest()) { - this.request = new ForeignKeysRequest(other.request); + this.request = new UniqueConstraintsRequest(other.request); } } - public get_foreign_keys_args deepCopy() { - return new get_foreign_keys_args(this); + public get_unique_constraints_args deepCopy() { + return new get_unique_constraints_args(this); } @Override @@ -132722,11 +134957,11 @@ public void clear() { this.request = null; } - public ForeignKeysRequest getRequest() { + public UniqueConstraintsRequest getRequest() { return this.request; } - public void setRequest(ForeignKeysRequest request) { + public void setRequest(UniqueConstraintsRequest request) { this.request = request; } @@ -132751,7 +134986,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((ForeignKeysRequest)value); + setRequest((UniqueConstraintsRequest)value); } break; @@ -132784,12 +135019,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_foreign_keys_args) - return this.equals((get_foreign_keys_args)that); + if (that instanceof get_unique_constraints_args) + return this.equals((get_unique_constraints_args)that); return false; } - public boolean equals(get_foreign_keys_args that) { + public boolean equals(get_unique_constraints_args that) { if (that == null) return false; @@ -132818,7 +135053,7 @@ public int hashCode() { } @Override - public int compareTo(get_foreign_keys_args other) { + public int compareTo(get_unique_constraints_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -132852,7 +135087,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_foreign_keys_args("); + StringBuilder sb = new StringBuilder("get_unique_constraints_args("); boolean first = true; sb.append("request:"); @@ -132890,15 +135125,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_foreign_keys_argsStandardSchemeFactory implements SchemeFactory { - public get_foreign_keys_argsStandardScheme getScheme() { - return new get_foreign_keys_argsStandardScheme(); + private static class get_unique_constraints_argsStandardSchemeFactory implements SchemeFactory { + public get_unique_constraints_argsStandardScheme getScheme() { + return new get_unique_constraints_argsStandardScheme(); } } - private static class get_foreign_keys_argsStandardScheme extends StandardScheme { + private static class get_unique_constraints_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constraints_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -132910,7 +135145,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_ar switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new ForeignKeysRequest(); + struct.request = new UniqueConstraintsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -132926,7 +135161,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_ar struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_unique_constraints_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -132941,16 +135176,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_a } - private static class get_foreign_keys_argsTupleSchemeFactory implements SchemeFactory { - public get_foreign_keys_argsTupleScheme getScheme() { - return new get_foreign_keys_argsTupleScheme(); + private static class get_unique_constraints_argsTupleSchemeFactory implements SchemeFactory { + public get_unique_constraints_argsTupleScheme getScheme() { + return new get_unique_constraints_argsTupleScheme(); } } - private static class get_foreign_keys_argsTupleScheme extends TupleScheme { + private static class get_unique_constraints_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -132963,11 +135198,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_ar } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new ForeignKeysRequest(); + struct.request = new UniqueConstraintsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -132976,8 +135211,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_arg } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_foreign_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_foreign_keys_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_unique_constraints_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_unique_constraints_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -132985,11 +135220,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_arg private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_foreign_keys_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_foreign_keys_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_unique_constraints_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_unique_constraints_resultTupleSchemeFactory()); } - private ForeignKeysResponse success; // required + private UniqueConstraintsResponse success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -133062,20 +135297,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ForeignKeysResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UniqueConstraintsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_foreign_keys_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_unique_constraints_result.class, metaDataMap); } - public get_foreign_keys_result() { + public get_unique_constraints_result() { } - public get_foreign_keys_result( - ForeignKeysResponse success, + public get_unique_constraints_result( + UniqueConstraintsResponse success, MetaException o1, NoSuchObjectException o2) { @@ -133088,9 +135323,9 @@ public get_foreign_keys_result( /** * Performs a deep copy on other. */ - public get_foreign_keys_result(get_foreign_keys_result other) { + public get_unique_constraints_result(get_unique_constraints_result other) { if (other.isSetSuccess()) { - this.success = new ForeignKeysResponse(other.success); + this.success = new UniqueConstraintsResponse(other.success); } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); @@ -133100,8 +135335,8 @@ public get_foreign_keys_result(get_foreign_keys_result other) { } } - public get_foreign_keys_result deepCopy() { - return new get_foreign_keys_result(this); + public get_unique_constraints_result deepCopy() { + return new get_unique_constraints_result(this); } @Override @@ -133111,11 +135346,11 @@ public void clear() { this.o2 = null; } - public ForeignKeysResponse getSuccess() { + public UniqueConstraintsResponse getSuccess() { return this.success; } - public void setSuccess(ForeignKeysResponse success) { + public void setSuccess(UniqueConstraintsResponse success) { this.success = success; } @@ -133186,7 +135421,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ForeignKeysResponse)value); + setSuccess((UniqueConstraintsResponse)value); } break; @@ -133245,12 +135480,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_foreign_keys_result) - return this.equals((get_foreign_keys_result)that); + if (that instanceof get_unique_constraints_result) + return this.equals((get_unique_constraints_result)that); return false; } - public boolean equals(get_foreign_keys_result that) { + public boolean equals(get_unique_constraints_result that) { if (that == null) return false; @@ -133307,7 +135542,7 @@ public int hashCode() { } @Override - public int compareTo(get_foreign_keys_result other) { + public int compareTo(get_unique_constraints_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -133361,7 +135596,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_foreign_keys_result("); + StringBuilder sb = new StringBuilder("get_unique_constraints_result("); boolean first = true; sb.append("success:"); @@ -133415,15 +135650,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_foreign_keys_resultStandardSchemeFactory implements SchemeFactory { - public get_foreign_keys_resultStandardScheme getScheme() { - return new get_foreign_keys_resultStandardScheme(); + private static class get_unique_constraints_resultStandardSchemeFactory implements SchemeFactory { + public get_unique_constraints_resultStandardScheme getScheme() { + return new get_unique_constraints_resultStandardScheme(); } } - private static class get_foreign_keys_resultStandardScheme extends StandardScheme { + private static class get_unique_constraints_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constraints_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -133435,7 +135670,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_re switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ForeignKeysResponse(); + struct.success = new UniqueConstraintsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -133469,7 +135704,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_re struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_unique_constraints_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -133494,16 +135729,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_r } - private static class get_foreign_keys_resultTupleSchemeFactory implements SchemeFactory { - public get_foreign_keys_resultTupleScheme getScheme() { - return new get_foreign_keys_resultTupleScheme(); + private static class get_unique_constraints_resultTupleSchemeFactory implements SchemeFactory { + public get_unique_constraints_resultTupleScheme getScheme() { + return new get_unique_constraints_resultTupleScheme(); } } - private static class get_foreign_keys_resultTupleScheme extends TupleScheme { + private static class get_unique_constraints_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -133528,11 +135763,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_re } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new ForeignKeysResponse(); + struct.success = new UniqueConstraintsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -133551,18 +135786,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_res } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_unique_constraints_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_unique_constraints_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_not_null_constraints_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_not_null_constraints_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_unique_constraints_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_unique_constraints_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_not_null_constraints_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_not_null_constraints_argsTupleSchemeFactory()); } - private UniqueConstraintsRequest request; // required + private NotNullConstraintsRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -133627,16 +135862,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UniqueConstraintsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotNullConstraintsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_unique_constraints_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_not_null_constraints_args.class, metaDataMap); } - public get_unique_constraints_args() { + public get_not_null_constraints_args() { } - public get_unique_constraints_args( - UniqueConstraintsRequest request) + public get_not_null_constraints_args( + NotNullConstraintsRequest request) { this(); this.request = request; @@ -133645,14 +135880,14 @@ public get_unique_constraints_args( /** * Performs a deep copy on other. */ - public get_unique_constraints_args(get_unique_constraints_args other) { + public get_not_null_constraints_args(get_not_null_constraints_args other) { if (other.isSetRequest()) { - this.request = new UniqueConstraintsRequest(other.request); + this.request = new NotNullConstraintsRequest(other.request); } } - public get_unique_constraints_args deepCopy() { - return new get_unique_constraints_args(this); + public get_not_null_constraints_args deepCopy() { + return new get_not_null_constraints_args(this); } @Override @@ -133660,11 +135895,11 @@ public void clear() { this.request = null; } - public UniqueConstraintsRequest getRequest() { + public NotNullConstraintsRequest getRequest() { return this.request; } - public void setRequest(UniqueConstraintsRequest request) { + public void setRequest(NotNullConstraintsRequest request) { this.request = request; } @@ -133689,7 +135924,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((UniqueConstraintsRequest)value); + setRequest((NotNullConstraintsRequest)value); } break; @@ -133722,12 +135957,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_unique_constraints_args) - return this.equals((get_unique_constraints_args)that); + if (that instanceof get_not_null_constraints_args) + return this.equals((get_not_null_constraints_args)that); return false; } - public boolean equals(get_unique_constraints_args that) { + public boolean equals(get_not_null_constraints_args that) { if (that == null) return false; @@ -133756,7 +135991,7 @@ public int hashCode() { } @Override - public int compareTo(get_unique_constraints_args other) { + public int compareTo(get_not_null_constraints_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -133790,7 +136025,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_unique_constraints_args("); + StringBuilder sb = new StringBuilder("get_not_null_constraints_args("); boolean first = true; sb.append("request:"); @@ -133828,15 +136063,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_unique_constraints_argsStandardSchemeFactory implements SchemeFactory { - public get_unique_constraints_argsStandardScheme getScheme() { - return new get_unique_constraints_argsStandardScheme(); + private static class get_not_null_constraints_argsStandardSchemeFactory implements SchemeFactory { + public get_not_null_constraints_argsStandardScheme getScheme() { + return new get_not_null_constraints_argsStandardScheme(); } } - private static class get_unique_constraints_argsStandardScheme extends StandardScheme { + private static class get_not_null_constraints_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constraints_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -133848,7 +136083,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constrai switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new UniqueConstraintsRequest(); + struct.request = new NotNullConstraintsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -133864,7 +136099,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constrai struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_unique_constraints_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -133879,16 +136114,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_unique_constra } - private static class get_unique_constraints_argsTupleSchemeFactory implements SchemeFactory { - public get_unique_constraints_argsTupleScheme getScheme() { - return new get_unique_constraints_argsTupleScheme(); + private static class get_not_null_constraints_argsTupleSchemeFactory implements SchemeFactory { + public get_not_null_constraints_argsTupleScheme getScheme() { + return new get_not_null_constraints_argsTupleScheme(); } } - private static class get_unique_constraints_argsTupleScheme extends TupleScheme { + private static class get_not_null_constraints_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -133901,11 +136136,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_unique_constrai } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new UniqueConstraintsRequest(); + struct.request = new NotNullConstraintsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -133914,8 +136149,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constrain } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_unique_constraints_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_unique_constraints_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_not_null_constraints_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_not_null_constraints_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -133923,11 +136158,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constrain private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_unique_constraints_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_unique_constraints_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_not_null_constraints_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_not_null_constraints_resultTupleSchemeFactory()); } - private UniqueConstraintsResponse success; // required + private NotNullConstraintsResponse success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -134000,20 +136235,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UniqueConstraintsResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotNullConstraintsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_unique_constraints_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_not_null_constraints_result.class, metaDataMap); } - public get_unique_constraints_result() { + public get_not_null_constraints_result() { } - public get_unique_constraints_result( - UniqueConstraintsResponse success, + public get_not_null_constraints_result( + NotNullConstraintsResponse success, MetaException o1, NoSuchObjectException o2) { @@ -134026,9 +136261,9 @@ public get_unique_constraints_result( /** * Performs a deep copy on other. */ - public get_unique_constraints_result(get_unique_constraints_result other) { + public get_not_null_constraints_result(get_not_null_constraints_result other) { if (other.isSetSuccess()) { - this.success = new UniqueConstraintsResponse(other.success); + this.success = new NotNullConstraintsResponse(other.success); } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); @@ -134038,8 +136273,8 @@ public get_unique_constraints_result(get_unique_constraints_result other) { } } - public get_unique_constraints_result deepCopy() { - return new get_unique_constraints_result(this); + public get_not_null_constraints_result deepCopy() { + return new get_not_null_constraints_result(this); } @Override @@ -134049,11 +136284,11 @@ public void clear() { this.o2 = null; } - public UniqueConstraintsResponse getSuccess() { + public NotNullConstraintsResponse getSuccess() { return this.success; } - public void setSuccess(UniqueConstraintsResponse success) { + public void setSuccess(NotNullConstraintsResponse success) { this.success = success; } @@ -134124,7 +136359,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((UniqueConstraintsResponse)value); + setSuccess((NotNullConstraintsResponse)value); } break; @@ -134183,12 +136418,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_unique_constraints_result) - return this.equals((get_unique_constraints_result)that); + if (that instanceof get_not_null_constraints_result) + return this.equals((get_not_null_constraints_result)that); return false; } - public boolean equals(get_unique_constraints_result that) { + public boolean equals(get_not_null_constraints_result that) { if (that == null) return false; @@ -134245,7 +136480,7 @@ public int hashCode() { } @Override - public int compareTo(get_unique_constraints_result other) { + public int compareTo(get_not_null_constraints_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -134299,7 +136534,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_unique_constraints_result("); + StringBuilder sb = new StringBuilder("get_not_null_constraints_result("); boolean first = true; sb.append("success:"); @@ -134353,15 +136588,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_unique_constraints_resultStandardSchemeFactory implements SchemeFactory { - public get_unique_constraints_resultStandardScheme getScheme() { - return new get_unique_constraints_resultStandardScheme(); + private static class get_not_null_constraints_resultStandardSchemeFactory implements SchemeFactory { + public get_not_null_constraints_resultStandardScheme getScheme() { + return new get_not_null_constraints_resultStandardScheme(); } } - private static class get_unique_constraints_resultStandardScheme extends StandardScheme { + private static class get_not_null_constraints_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constraints_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -134373,7 +136608,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constrai switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new UniqueConstraintsResponse(); + struct.success = new NotNullConstraintsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -134407,7 +136642,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_unique_constrai struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_unique_constraints_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -134432,16 +136667,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_unique_constra } - private static class get_unique_constraints_resultTupleSchemeFactory implements SchemeFactory { - public get_unique_constraints_resultTupleScheme getScheme() { - return new get_unique_constraints_resultTupleScheme(); + private static class get_not_null_constraints_resultTupleSchemeFactory implements SchemeFactory { + public get_not_null_constraints_resultTupleScheme getScheme() { + return new get_not_null_constraints_resultTupleScheme(); } } - private static class get_unique_constraints_resultTupleScheme extends TupleScheme { + private static class get_not_null_constraints_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -134466,11 +136701,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_unique_constrai } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constraints_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new UniqueConstraintsResponse(); + struct.success = new NotNullConstraintsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -134489,18 +136724,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_unique_constrain } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_not_null_constraints_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_not_null_constraints_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_default_constraints_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_default_constraints_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_not_null_constraints_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_not_null_constraints_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_default_constraints_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_default_constraints_argsTupleSchemeFactory()); } - private NotNullConstraintsRequest request; // required + private DefaultConstraintsRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -134565,16 +136800,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotNullConstraintsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DefaultConstraintsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_not_null_constraints_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_default_constraints_args.class, metaDataMap); } - public get_not_null_constraints_args() { + public get_default_constraints_args() { } - public get_not_null_constraints_args( - NotNullConstraintsRequest request) + public get_default_constraints_args( + DefaultConstraintsRequest request) { this(); this.request = request; @@ -134583,14 +136818,14 @@ public get_not_null_constraints_args( /** * Performs a deep copy on other. */ - public get_not_null_constraints_args(get_not_null_constraints_args other) { + public get_default_constraints_args(get_default_constraints_args other) { if (other.isSetRequest()) { - this.request = new NotNullConstraintsRequest(other.request); + this.request = new DefaultConstraintsRequest(other.request); } } - public get_not_null_constraints_args deepCopy() { - return new get_not_null_constraints_args(this); + public get_default_constraints_args deepCopy() { + return new get_default_constraints_args(this); } @Override @@ -134598,11 +136833,11 @@ public void clear() { this.request = null; } - public NotNullConstraintsRequest getRequest() { + public DefaultConstraintsRequest getRequest() { return this.request; } - public void setRequest(NotNullConstraintsRequest request) { + public void setRequest(DefaultConstraintsRequest request) { this.request = request; } @@ -134627,7 +136862,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((NotNullConstraintsRequest)value); + setRequest((DefaultConstraintsRequest)value); } break; @@ -134660,12 +136895,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_not_null_constraints_args) - return this.equals((get_not_null_constraints_args)that); + if (that instanceof get_default_constraints_args) + return this.equals((get_default_constraints_args)that); return false; } - public boolean equals(get_not_null_constraints_args that) { + public boolean equals(get_default_constraints_args that) { if (that == null) return false; @@ -134694,7 +136929,7 @@ public int hashCode() { } @Override - public int compareTo(get_not_null_constraints_args other) { + public int compareTo(get_default_constraints_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -134728,7 +136963,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_not_null_constraints_args("); + StringBuilder sb = new StringBuilder("get_default_constraints_args("); boolean first = true; sb.append("request:"); @@ -134766,15 +137001,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_not_null_constraints_argsStandardSchemeFactory implements SchemeFactory { - public get_not_null_constraints_argsStandardScheme getScheme() { - return new get_not_null_constraints_argsStandardScheme(); + private static class get_default_constraints_argsStandardSchemeFactory implements SchemeFactory { + public get_default_constraints_argsStandardScheme getScheme() { + return new get_default_constraints_argsStandardScheme(); } } - private static class get_not_null_constraints_argsStandardScheme extends StandardScheme { + private static class get_default_constraints_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_default_constraints_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -134786,7 +137021,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constr switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new NotNullConstraintsRequest(); + struct.request = new DefaultConstraintsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -134802,7 +137037,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constr struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_default_constraints_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -134817,16 +137052,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_not_null_const } - private static class get_not_null_constraints_argsTupleSchemeFactory implements SchemeFactory { - public get_not_null_constraints_argsTupleScheme getScheme() { - return new get_not_null_constraints_argsTupleScheme(); + private static class get_default_constraints_argsTupleSchemeFactory implements SchemeFactory { + public get_default_constraints_argsTupleScheme getScheme() { + return new get_default_constraints_argsTupleScheme(); } } - private static class get_not_null_constraints_argsTupleScheme extends TupleScheme { + private static class get_default_constraints_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_default_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -134839,11 +137074,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_not_null_constr } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_default_constraints_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new NotNullConstraintsRequest(); + struct.request = new DefaultConstraintsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -134852,8 +137087,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_not_null_constra } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_not_null_constraints_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_not_null_constraints_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_default_constraints_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_default_constraints_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -134861,11 +137096,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_not_null_constra private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_not_null_constraints_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_not_null_constraints_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_default_constraints_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_default_constraints_resultTupleSchemeFactory()); } - private NotNullConstraintsResponse success; // required + private DefaultConstraintsResponse success; // required private MetaException o1; // required private NoSuchObjectException o2; // required @@ -134938,20 +137173,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotNullConstraintsResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DefaultConstraintsResponse.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_not_null_constraints_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_default_constraints_result.class, metaDataMap); } - public get_not_null_constraints_result() { + public get_default_constraints_result() { } - public get_not_null_constraints_result( - NotNullConstraintsResponse success, + public get_default_constraints_result( + DefaultConstraintsResponse success, MetaException o1, NoSuchObjectException o2) { @@ -134964,9 +137199,9 @@ public get_not_null_constraints_result( /** * Performs a deep copy on other. */ - public get_not_null_constraints_result(get_not_null_constraints_result other) { + public get_default_constraints_result(get_default_constraints_result other) { if (other.isSetSuccess()) { - this.success = new NotNullConstraintsResponse(other.success); + this.success = new DefaultConstraintsResponse(other.success); } if (other.isSetO1()) { this.o1 = new MetaException(other.o1); @@ -134976,8 +137211,8 @@ public get_not_null_constraints_result(get_not_null_constraints_result other) { } } - public get_not_null_constraints_result deepCopy() { - return new get_not_null_constraints_result(this); + public get_default_constraints_result deepCopy() { + return new get_default_constraints_result(this); } @Override @@ -134987,11 +137222,11 @@ public void clear() { this.o2 = null; } - public NotNullConstraintsResponse getSuccess() { + public DefaultConstraintsResponse getSuccess() { return this.success; } - public void setSuccess(NotNullConstraintsResponse success) { + public void setSuccess(DefaultConstraintsResponse success) { this.success = success; } @@ -135062,7 +137297,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((NotNullConstraintsResponse)value); + setSuccess((DefaultConstraintsResponse)value); } break; @@ -135121,12 +137356,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_not_null_constraints_result) - return this.equals((get_not_null_constraints_result)that); + if (that instanceof get_default_constraints_result) + return this.equals((get_default_constraints_result)that); return false; } - public boolean equals(get_not_null_constraints_result that) { + public boolean equals(get_default_constraints_result that) { if (that == null) return false; @@ -135183,7 +137418,7 @@ public int hashCode() { } @Override - public int compareTo(get_not_null_constraints_result other) { + public int compareTo(get_default_constraints_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -135237,7 +137472,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_not_null_constraints_result("); + StringBuilder sb = new StringBuilder("get_default_constraints_result("); boolean first = true; sb.append("success:"); @@ -135291,15 +137526,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_not_null_constraints_resultStandardSchemeFactory implements SchemeFactory { - public get_not_null_constraints_resultStandardScheme getScheme() { - return new get_not_null_constraints_resultStandardScheme(); + private static class get_default_constraints_resultStandardSchemeFactory implements SchemeFactory { + public get_default_constraints_resultStandardScheme getScheme() { + return new get_default_constraints_resultStandardScheme(); } } - private static class get_not_null_constraints_resultStandardScheme extends StandardScheme { + private static class get_default_constraints_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_default_constraints_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -135311,7 +137546,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constr switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new NotNullConstraintsResponse(); + struct.success = new DefaultConstraintsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -135345,7 +137580,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_not_null_constr struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_default_constraints_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -135370,16 +137605,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_not_null_const } - private static class get_not_null_constraints_resultTupleSchemeFactory implements SchemeFactory { - public get_not_null_constraints_resultTupleScheme getScheme() { - return new get_not_null_constraints_resultTupleScheme(); + private static class get_default_constraints_resultTupleSchemeFactory implements SchemeFactory { + public get_default_constraints_resultTupleScheme getScheme() { + return new get_default_constraints_resultTupleScheme(); } } - private static class get_not_null_constraints_resultTupleScheme extends TupleScheme { + private static class get_default_constraints_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_default_constraints_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -135404,11 +137639,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_not_null_constr } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_not_null_constraints_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_default_constraints_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new NotNullConstraintsResponse(); + struct.success = new DefaultConstraintsResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -151147,13 +153382,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); - struct.success = new ArrayList(_list1360.size); - String _elem1361; - for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) + org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); + struct.success = new ArrayList(_list1384.size); + String _elem1385; + for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) { - _elem1361 = iprot.readString(); - struct.success.add(_elem1361); + _elem1385 = iprot.readString(); + struct.success.add(_elem1385); } iprot.readListEnd(); } @@ -151188,9 +153423,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1363 : struct.success) + for (String _iter1387 : struct.success) { - oprot.writeString(_iter1363); + oprot.writeString(_iter1387); } oprot.writeListEnd(); } @@ -151229,9 +153464,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1364 : struct.success) + for (String _iter1388 : struct.success) { - oprot.writeString(_iter1364); + oprot.writeString(_iter1388); } } } @@ -151246,13 +153481,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1365.size); - String _elem1366; - for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) + org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1389.size); + String _elem1390; + for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) { - _elem1366 = iprot.readString(); - struct.success.add(_elem1366); + _elem1390 = iprot.readString(); + struct.success.add(_elem1390); } } struct.setSuccessIsSet(true); @@ -155307,13 +157542,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); - struct.success = new ArrayList(_list1368.size); - String _elem1369; - for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) + org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); + struct.success = new ArrayList(_list1392.size); + String _elem1393; + for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) { - _elem1369 = iprot.readString(); - struct.success.add(_elem1369); + _elem1393 = iprot.readString(); + struct.success.add(_elem1393); } iprot.readListEnd(); } @@ -155348,9 +157583,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1371 : struct.success) + for (String _iter1395 : struct.success) { - oprot.writeString(_iter1371); + oprot.writeString(_iter1395); } oprot.writeListEnd(); } @@ -155389,9 +157624,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1372 : struct.success) + for (String _iter1396 : struct.success) { - oprot.writeString(_iter1372); + oprot.writeString(_iter1396); } } } @@ -155406,13 +157641,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1373.size); - String _elem1374; - for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) + org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1397.size); + String _elem1398; + for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) { - _elem1374 = iprot.readString(); - struct.success.add(_elem1374); + _elem1398 = iprot.readString(); + struct.success.add(_elem1398); } } struct.setSuccessIsSet(true); @@ -158703,14 +160938,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1376 = iprot.readListBegin(); - struct.success = new ArrayList(_list1376.size); - Role _elem1377; - for (int _i1378 = 0; _i1378 < _list1376.size; ++_i1378) + org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); + struct.success = new ArrayList(_list1400.size); + Role _elem1401; + for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) { - _elem1377 = new Role(); - _elem1377.read(iprot); - struct.success.add(_elem1377); + _elem1401 = new Role(); + _elem1401.read(iprot); + struct.success.add(_elem1401); } iprot.readListEnd(); } @@ -158745,9 +160980,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1379 : struct.success) + for (Role _iter1403 : struct.success) { - _iter1379.write(oprot); + _iter1403.write(oprot); } oprot.writeListEnd(); } @@ -158786,9 +161021,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1380 : struct.success) + for (Role _iter1404 : struct.success) { - _iter1380.write(oprot); + _iter1404.write(oprot); } } } @@ -158803,14 +161038,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1381.size); - Role _elem1382; - for (int _i1383 = 0; _i1383 < _list1381.size; ++_i1383) + org.apache.thrift.protocol.TList _list1405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1405.size); + Role _elem1406; + for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) { - _elem1382 = new Role(); - _elem1382.read(iprot); - struct.success.add(_elem1382); + _elem1406 = new Role(); + _elem1406.read(iprot); + struct.success.add(_elem1406); } } struct.setSuccessIsSet(true); @@ -161815,13 +164050,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1384 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1384.size); - String _elem1385; - for (int _i1386 = 0; _i1386 < _list1384.size; ++_i1386) + org.apache.thrift.protocol.TList _list1408 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1408.size); + String _elem1409; + for (int _i1410 = 0; _i1410 < _list1408.size; ++_i1410) { - _elem1385 = iprot.readString(); - struct.group_names.add(_elem1385); + _elem1409 = iprot.readString(); + struct.group_names.add(_elem1409); } iprot.readListEnd(); } @@ -161857,9 +164092,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1387 : struct.group_names) + for (String _iter1411 : struct.group_names) { - oprot.writeString(_iter1387); + oprot.writeString(_iter1411); } oprot.writeListEnd(); } @@ -161902,9 +164137,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1388 : struct.group_names) + for (String _iter1412 : struct.group_names) { - oprot.writeString(_iter1388); + oprot.writeString(_iter1412); } } } @@ -161925,13 +164160,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1389.size); - String _elem1390; - for (int _i1391 = 0; _i1391 < _list1389.size; ++_i1391) + org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1413.size); + String _elem1414; + for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) { - _elem1390 = iprot.readString(); - struct.group_names.add(_elem1390); + _elem1414 = iprot.readString(); + struct.group_names.add(_elem1414); } } struct.setGroup_namesIsSet(true); @@ -163389,14 +165624,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1392 = iprot.readListBegin(); - struct.success = new ArrayList(_list1392.size); - HiveObjectPrivilege _elem1393; - for (int _i1394 = 0; _i1394 < _list1392.size; ++_i1394) + org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); + struct.success = new ArrayList(_list1416.size); + HiveObjectPrivilege _elem1417; + for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) { - _elem1393 = new HiveObjectPrivilege(); - _elem1393.read(iprot); - struct.success.add(_elem1393); + _elem1417 = new HiveObjectPrivilege(); + _elem1417.read(iprot); + struct.success.add(_elem1417); } iprot.readListEnd(); } @@ -163431,9 +165666,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1395 : struct.success) + for (HiveObjectPrivilege _iter1419 : struct.success) { - _iter1395.write(oprot); + _iter1419.write(oprot); } oprot.writeListEnd(); } @@ -163472,9 +165707,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1396 : struct.success) + for (HiveObjectPrivilege _iter1420 : struct.success) { - _iter1396.write(oprot); + _iter1420.write(oprot); } } } @@ -163489,14 +165724,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1397.size); - HiveObjectPrivilege _elem1398; - for (int _i1399 = 0; _i1399 < _list1397.size; ++_i1399) + org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1421.size); + HiveObjectPrivilege _elem1422; + for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) { - _elem1398 = new HiveObjectPrivilege(); - _elem1398.read(iprot); - struct.success.add(_elem1398); + _elem1422 = new HiveObjectPrivilege(); + _elem1422.read(iprot); + struct.success.add(_elem1422); } } struct.setSuccessIsSet(true); @@ -166398,13 +168633,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1400 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1400.size); - String _elem1401; - for (int _i1402 = 0; _i1402 < _list1400.size; ++_i1402) + org.apache.thrift.protocol.TList _list1424 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1424.size); + String _elem1425; + for (int _i1426 = 0; _i1426 < _list1424.size; ++_i1426) { - _elem1401 = iprot.readString(); - struct.group_names.add(_elem1401); + _elem1425 = iprot.readString(); + struct.group_names.add(_elem1425); } iprot.readListEnd(); } @@ -166435,9 +168670,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1403 : struct.group_names) + for (String _iter1427 : struct.group_names) { - oprot.writeString(_iter1403); + oprot.writeString(_iter1427); } oprot.writeListEnd(); } @@ -166474,9 +168709,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1404 : struct.group_names) + for (String _iter1428 : struct.group_names) { - oprot.writeString(_iter1404); + oprot.writeString(_iter1428); } } } @@ -166492,13 +168727,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1405.size); - String _elem1406; - for (int _i1407 = 0; _i1407 < _list1405.size; ++_i1407) + org.apache.thrift.protocol.TList _list1429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1429.size); + String _elem1430; + for (int _i1431 = 0; _i1431 < _list1429.size; ++_i1431) { - _elem1406 = iprot.readString(); - struct.group_names.add(_elem1406); + _elem1430 = iprot.readString(); + struct.group_names.add(_elem1430); } } struct.setGroup_namesIsSet(true); @@ -166901,13 +169136,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1408 = iprot.readListBegin(); - struct.success = new ArrayList(_list1408.size); - String _elem1409; - for (int _i1410 = 0; _i1410 < _list1408.size; ++_i1410) + org.apache.thrift.protocol.TList _list1432 = iprot.readListBegin(); + struct.success = new ArrayList(_list1432.size); + String _elem1433; + for (int _i1434 = 0; _i1434 < _list1432.size; ++_i1434) { - _elem1409 = iprot.readString(); - struct.success.add(_elem1409); + _elem1433 = iprot.readString(); + struct.success.add(_elem1433); } iprot.readListEnd(); } @@ -166942,9 +169177,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1411 : struct.success) + for (String _iter1435 : struct.success) { - oprot.writeString(_iter1411); + oprot.writeString(_iter1435); } oprot.writeListEnd(); } @@ -166983,9 +169218,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1412 : struct.success) + for (String _iter1436 : struct.success) { - oprot.writeString(_iter1412); + oprot.writeString(_iter1436); } } } @@ -167000,13 +169235,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1413.size); - String _elem1414; - for (int _i1415 = 0; _i1415 < _list1413.size; ++_i1415) + org.apache.thrift.protocol.TList _list1437 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1437.size); + String _elem1438; + for (int _i1439 = 0; _i1439 < _list1437.size; ++_i1439) { - _elem1414 = iprot.readString(); - struct.success.add(_elem1414); + _elem1438 = iprot.readString(); + struct.success.add(_elem1438); } } struct.setSuccessIsSet(true); @@ -172297,13 +174532,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1416 = iprot.readListBegin(); - struct.success = new ArrayList(_list1416.size); - String _elem1417; - for (int _i1418 = 0; _i1418 < _list1416.size; ++_i1418) + org.apache.thrift.protocol.TList _list1440 = iprot.readListBegin(); + struct.success = new ArrayList(_list1440.size); + String _elem1441; + for (int _i1442 = 0; _i1442 < _list1440.size; ++_i1442) { - _elem1417 = iprot.readString(); - struct.success.add(_elem1417); + _elem1441 = iprot.readString(); + struct.success.add(_elem1441); } iprot.readListEnd(); } @@ -172329,9 +174564,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1419 : struct.success) + for (String _iter1443 : struct.success) { - oprot.writeString(_iter1419); + oprot.writeString(_iter1443); } oprot.writeListEnd(); } @@ -172362,9 +174597,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1420 : struct.success) + for (String _iter1444 : struct.success) { - oprot.writeString(_iter1420); + oprot.writeString(_iter1444); } } } @@ -172376,13 +174611,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1421.size); - String _elem1422; - for (int _i1423 = 0; _i1423 < _list1421.size; ++_i1423) + org.apache.thrift.protocol.TList _list1445 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1445.size); + String _elem1446; + for (int _i1447 = 0; _i1447 < _list1445.size; ++_i1447) { - _elem1422 = iprot.readString(); - struct.success.add(_elem1422); + _elem1446 = iprot.readString(); + struct.success.add(_elem1446); } } struct.setSuccessIsSet(true); @@ -175412,13 +177647,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1424 = iprot.readListBegin(); - struct.success = new ArrayList(_list1424.size); - String _elem1425; - for (int _i1426 = 0; _i1426 < _list1424.size; ++_i1426) + org.apache.thrift.protocol.TList _list1448 = iprot.readListBegin(); + struct.success = new ArrayList(_list1448.size); + String _elem1449; + for (int _i1450 = 0; _i1450 < _list1448.size; ++_i1450) { - _elem1425 = iprot.readString(); - struct.success.add(_elem1425); + _elem1449 = iprot.readString(); + struct.success.add(_elem1449); } iprot.readListEnd(); } @@ -175444,9 +177679,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1427 : struct.success) + for (String _iter1451 : struct.success) { - oprot.writeString(_iter1427); + oprot.writeString(_iter1451); } oprot.writeListEnd(); } @@ -175477,9 +177712,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1428 : struct.success) + for (String _iter1452 : struct.success) { - oprot.writeString(_iter1428); + oprot.writeString(_iter1452); } } } @@ -175491,13 +177726,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1429.size); - String _elem1430; - for (int _i1431 = 0; _i1431 < _list1429.size; ++_i1431) + org.apache.thrift.protocol.TList _list1453 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1453.size); + String _elem1454; + for (int _i1455 = 0; _i1455 < _list1453.size; ++_i1455) { - _elem1430 = iprot.readString(); - struct.success.add(_elem1430); + _elem1454 = iprot.readString(); + struct.success.add(_elem1454); } } struct.setSuccessIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index 9c05a18123..dedbcce731 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list794 = iprot.readListBegin(); - struct.pools = new ArrayList(_list794.size); - WMPool _elem795; - for (int _i796 = 0; _i796 < _list794.size; ++_i796) + org.apache.thrift.protocol.TList _list810 = iprot.readListBegin(); + struct.pools = new ArrayList(_list810.size); + WMPool _elem811; + for (int _i812 = 0; _i812 < _list810.size; ++_i812) { - _elem795 = new WMPool(); - _elem795.read(iprot); - struct.pools.add(_elem795); + _elem811 = new WMPool(); + _elem811.read(iprot); + struct.pools.add(_elem811); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list797 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list797.size); - WMMapping _elem798; - for (int _i799 = 0; _i799 < _list797.size; ++_i799) + org.apache.thrift.protocol.TList _list813 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list813.size); + WMMapping _elem814; + for (int _i815 = 0; _i815 < _list813.size; ++_i815) { - _elem798 = new WMMapping(); - _elem798.read(iprot); - struct.mappings.add(_elem798); + _elem814 = new WMMapping(); + _elem814.read(iprot); + struct.mappings.add(_elem814); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list800 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list800.size); - WMTrigger _elem801; - for (int _i802 = 0; _i802 < _list800.size; ++_i802) + org.apache.thrift.protocol.TList _list816 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list816.size); + WMTrigger _elem817; + for (int _i818 = 0; _i818 < _list816.size; ++_i818) { - _elem801 = new WMTrigger(); - _elem801.read(iprot); - struct.triggers.add(_elem801); + _elem817 = new WMTrigger(); + _elem817.read(iprot); + struct.triggers.add(_elem817); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list803 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list803.size); - WMPoolTrigger _elem804; - for (int _i805 = 0; _i805 < _list803.size; ++_i805) + org.apache.thrift.protocol.TList _list819 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list819.size); + WMPoolTrigger _elem820; + for (int _i821 = 0; _i821 < _list819.size; ++_i821) { - _elem804 = new WMPoolTrigger(); - _elem804.read(iprot); - struct.poolTriggers.add(_elem804); + _elem820 = new WMPoolTrigger(); + _elem820.read(iprot); + struct.poolTriggers.add(_elem820); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter806 : struct.pools) + for (WMPool _iter822 : struct.pools) { - _iter806.write(oprot); + _iter822.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter807 : struct.mappings) + for (WMMapping _iter823 : struct.mappings) { - _iter807.write(oprot); + _iter823.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter808 : struct.triggers) + for (WMTrigger _iter824 : struct.triggers) { - _iter808.write(oprot); + _iter824.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter809 : struct.poolTriggers) + for (WMPoolTrigger _iter825 : struct.poolTriggers) { - _iter809.write(oprot); + _iter825.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter810 : struct.pools) + for (WMPool _iter826 : struct.pools) { - _iter810.write(oprot); + _iter826.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter811 : struct.mappings) + for (WMMapping _iter827 : struct.mappings) { - _iter811.write(oprot); + _iter827.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter812 : struct.triggers) + for (WMTrigger _iter828 : struct.triggers) { - _iter812.write(oprot); + _iter828.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter813 : struct.poolTriggers) + for (WMPoolTrigger _iter829 : struct.poolTriggers) { - _iter813.write(oprot); + _iter829.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list814 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list814.size); - WMPool _elem815; - for (int _i816 = 0; _i816 < _list814.size; ++_i816) + org.apache.thrift.protocol.TList _list830 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list830.size); + WMPool _elem831; + for (int _i832 = 0; _i832 < _list830.size; ++_i832) { - _elem815 = new WMPool(); - _elem815.read(iprot); - struct.pools.add(_elem815); + _elem831 = new WMPool(); + _elem831.read(iprot); + struct.pools.add(_elem831); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list817.size); - WMMapping _elem818; - for (int _i819 = 0; _i819 < _list817.size; ++_i819) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list833.size); + WMMapping _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem818 = new WMMapping(); - _elem818.read(iprot); - struct.mappings.add(_elem818); + _elem834 = new WMMapping(); + _elem834.read(iprot); + struct.mappings.add(_elem834); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list820 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list820.size); - WMTrigger _elem821; - for (int _i822 = 0; _i822 < _list820.size; ++_i822) + org.apache.thrift.protocol.TList _list836 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list836.size); + WMTrigger _elem837; + for (int _i838 = 0; _i838 < _list836.size; ++_i838) { - _elem821 = new WMTrigger(); - _elem821.read(iprot); - struct.triggers.add(_elem821); + _elem837 = new WMTrigger(); + _elem837.read(iprot); + struct.triggers.add(_elem837); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list823 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list823.size); - WMPoolTrigger _elem824; - for (int _i825 = 0; _i825 < _list823.size; ++_i825) + org.apache.thrift.protocol.TList _list839 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list839.size); + WMPoolTrigger _elem840; + for (int _i841 = 0; _i841 < _list839.size; ++_i841) { - _elem824 = new WMPoolTrigger(); - _elem824.read(iprot); - struct.poolTriggers.add(_elem824); + _elem840 = new WMPoolTrigger(); + _elem840.read(iprot); + struct.poolTriggers.add(_elem840); } } struct.setPoolTriggersIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index ba44e3ac21..59bf429afd 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list826 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list826.size); - WMResourcePlan _elem827; - for (int _i828 = 0; _i828 < _list826.size; ++_i828) + org.apache.thrift.protocol.TList _list842 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list842.size); + WMResourcePlan _elem843; + for (int _i844 = 0; _i844 < _list842.size; ++_i844) { - _elem827 = new WMResourcePlan(); - _elem827.read(iprot); - struct.resourcePlans.add(_elem827); + _elem843 = new WMResourcePlan(); + _elem843.read(iprot); + struct.resourcePlans.add(_elem843); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter829 : struct.resourcePlans) + for (WMResourcePlan _iter845 : struct.resourcePlans) { - _iter829.write(oprot); + _iter845.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter830 : struct.resourcePlans) + for (WMResourcePlan _iter846 : struct.resourcePlans) { - _iter830.write(oprot); + _iter846.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list831 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list831.size); - WMResourcePlan _elem832; - for (int _i833 = 0; _i833 < _list831.size; ++_i833) + org.apache.thrift.protocol.TList _list847 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list847.size); + WMResourcePlan _elem848; + for (int _i849 = 0; _i849 < _list847.size; ++_i849) { - _elem832 = new WMResourcePlan(); - _elem832.read(iprot); - struct.resourcePlans.add(_elem832); + _elem848 = new WMResourcePlan(); + _elem848.read(iprot); + struct.resourcePlans.add(_elem848); } } struct.setResourcePlansIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index edec382d19..5c6d680186 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list850 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list850.size); - WMTrigger _elem851; - for (int _i852 = 0; _i852 < _list850.size; ++_i852) + org.apache.thrift.protocol.TList _list866 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list866.size); + WMTrigger _elem867; + for (int _i868 = 0; _i868 < _list866.size; ++_i868) { - _elem851 = new WMTrigger(); - _elem851.read(iprot); - struct.triggers.add(_elem851); + _elem867 = new WMTrigger(); + _elem867.read(iprot); + struct.triggers.add(_elem867); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter853 : struct.triggers) + for (WMTrigger _iter869 : struct.triggers) { - _iter853.write(oprot); + _iter869.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter854 : struct.triggers) + for (WMTrigger _iter870 : struct.triggers) { - _iter854.write(oprot); + _iter870.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list855 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list855.size); - WMTrigger _elem856; - for (int _i857 = 0; _i857 < _list855.size; ++_i857) + org.apache.thrift.protocol.TList _list871 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list871.size); + WMTrigger _elem872; + for (int _i873 = 0; _i873 < _list871.size; ++_i873) { - _elem856 = new WMTrigger(); - _elem856.read(iprot); - struct.triggers.add(_elem856); + _elem872 = new WMTrigger(); + _elem872.read(iprot); + struct.triggers.add(_elem872); } } struct.setTriggersIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index 228f37f725..dba307a1ae 100644 --- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list834 = iprot.readListBegin(); - struct.errors = new ArrayList(_list834.size); - String _elem835; - for (int _i836 = 0; _i836 < _list834.size; ++_i836) + org.apache.thrift.protocol.TList _list850 = iprot.readListBegin(); + struct.errors = new ArrayList(_list850.size); + String _elem851; + for (int _i852 = 0; _i852 < _list850.size; ++_i852) { - _elem835 = iprot.readString(); - struct.errors.add(_elem835); + _elem851 = iprot.readString(); + struct.errors.add(_elem851); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list837 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list837.size); - String _elem838; - for (int _i839 = 0; _i839 < _list837.size; ++_i839) + org.apache.thrift.protocol.TList _list853 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list853.size); + String _elem854; + for (int _i855 = 0; _i855 < _list853.size; ++_i855) { - _elem838 = iprot.readString(); - struct.warnings.add(_elem838); + _elem854 = iprot.readString(); + struct.warnings.add(_elem854); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter840 : struct.errors) + for (String _iter856 : struct.errors) { - oprot.writeString(_iter840); + oprot.writeString(_iter856); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter841 : struct.warnings) + for (String _iter857 : struct.warnings) { - oprot.writeString(_iter841); + oprot.writeString(_iter857); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter842 : struct.errors) + for (String _iter858 : struct.errors) { - oprot.writeString(_iter842); + oprot.writeString(_iter858); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter843 : struct.warnings) + for (String _iter859 : struct.warnings) { - oprot.writeString(_iter843); + oprot.writeString(_iter859); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list844 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list844.size); - String _elem845; - for (int _i846 = 0; _i846 < _list844.size; ++_i846) + org.apache.thrift.protocol.TList _list860 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list860.size); + String _elem861; + for (int _i862 = 0; _i862 < _list860.size; ++_i862) { - _elem845 = iprot.readString(); - struct.errors.add(_elem845); + _elem861 = iprot.readString(); + struct.errors.add(_elem861); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list847 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list847.size); - String _elem848; - for (int _i849 = 0; _i849 < _list847.size; ++_i849) + org.apache.thrift.protocol.TList _list863 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list863.size); + String _elem864; + for (int _i865 = 0; _i865 < _list863.size; ++_i865) { - _elem848 = iprot.readString(); - struct.warnings.add(_elem848); + _elem864 = iprot.readString(); + struct.warnings.add(_elem864); } } struct.setWarningsIsSet(true); diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index d063de853c..57b6bfa110 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -162,12 +162,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @param \metastore\SQLForeignKey[] $foreignKeys * @param \metastore\SQLUniqueConstraint[] $uniqueConstraints * @param \metastore\SQLNotNullConstraint[] $notNullConstraints + * @param \metastore\SQLDefaultConstraint[] $defaultConstraints * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException * @throws \metastore\MetaException * @throws \metastore\NoSuchObjectException */ - public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints); + public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints); /** * @param \metastore\DropConstraintRequest $req * @throws \metastore\NoSuchObjectException @@ -198,6 +199,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\MetaException */ public function add_not_null_constraint(\metastore\AddNotNullConstraintRequest $req); + /** + * @param \metastore\AddDefaultConstraintRequest $req + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function add_default_constraint(\metastore\AddDefaultConstraintRequest $req); /** * @param string $dbname * @param string $name @@ -827,6 +834,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\NoSuchObjectException */ public function get_not_null_constraints(\metastore\NotNullConstraintsRequest $request); + /** + * @param \metastore\DefaultConstraintsRequest $request + * @return \metastore\DefaultConstraintsResponse + * @throws \metastore\MetaException + * @throws \metastore\NoSuchObjectException + */ + public function get_default_constraints(\metastore\DefaultConstraintsRequest $request); /** * @param \metastore\ColumnStatistics $stats_obj * @return bool @@ -2463,13 +2477,13 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } - public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints) + public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints) { - $this->send_create_table_with_constraints($tbl, $primaryKeys, $foreignKeys, $uniqueConstraints, $notNullConstraints); + $this->send_create_table_with_constraints($tbl, $primaryKeys, $foreignKeys, $uniqueConstraints, $notNullConstraints, $defaultConstraints); $this->recv_create_table_with_constraints(); } - public function send_create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints) + public function send_create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints) { $args = new \metastore\ThriftHiveMetastore_create_table_with_constraints_args(); $args->tbl = $tbl; @@ -2477,6 +2491,7 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas $args->foreignKeys = $foreignKeys; $args->uniqueConstraints = $uniqueConstraints; $args->notNullConstraints = $notNullConstraints; + $args->defaultConstraints = $defaultConstraints; $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); if ($bin_accel) { @@ -2797,6 +2812,60 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function add_default_constraint(\metastore\AddDefaultConstraintRequest $req) + { + $this->send_add_default_constraint($req); + $this->recv_add_default_constraint(); + } + + public function send_add_default_constraint(\metastore\AddDefaultConstraintRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_add_default_constraint_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'add_default_constraint', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('add_default_constraint', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_add_default_constraint() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_default_constraint_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_add_default_constraint_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + return; + } + public function drop_table($dbname, $name, $deleteData) { $this->send_drop_table($dbname, $name, $deleteData); @@ -6974,6 +7043,63 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_not_null_constraints failed: unknown result"); } + public function get_default_constraints(\metastore\DefaultConstraintsRequest $request) + { + $this->send_get_default_constraints($request); + return $this->recv_get_default_constraints(); + } + + public function send_get_default_constraints(\metastore\DefaultConstraintsRequest $request) + { + $args = new \metastore\ThriftHiveMetastore_get_default_constraints_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_default_constraints', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_default_constraints', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_default_constraints() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_default_constraints_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_default_constraints_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_default_constraints failed: unknown result"); + } + public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj) { $this->send_update_table_column_statistics($stats_obj); @@ -13064,14 +13190,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size759 = 0; - $_etype762 = 0; - $xfer += $input->readListBegin($_etype762, $_size759); - for ($_i763 = 0; $_i763 < $_size759; ++$_i763) + $_size773 = 0; + $_etype776 = 0; + $xfer += $input->readListBegin($_etype776, $_size773); + for ($_i777 = 0; $_i777 < $_size773; ++$_i777) { - $elem764 = null; - $xfer += $input->readString($elem764); - $this->success []= $elem764; + $elem778 = null; + $xfer += $input->readString($elem778); + $this->success []= $elem778; } $xfer += $input->readListEnd(); } else { @@ -13107,9 +13233,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter765) + foreach ($this->success as $iter779) { - $xfer += $output->writeString($iter765); + $xfer += $output->writeString($iter779); } } $output->writeListEnd(); @@ -13240,14 +13366,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size766 = 0; - $_etype769 = 0; - $xfer += $input->readListBegin($_etype769, $_size766); - for ($_i770 = 0; $_i770 < $_size766; ++$_i770) + $_size780 = 0; + $_etype783 = 0; + $xfer += $input->readListBegin($_etype783, $_size780); + for ($_i784 = 0; $_i784 < $_size780; ++$_i784) { - $elem771 = null; - $xfer += $input->readString($elem771); - $this->success []= $elem771; + $elem785 = null; + $xfer += $input->readString($elem785); + $this->success []= $elem785; } $xfer += $input->readListEnd(); } else { @@ -13283,9 +13409,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter772) + foreach ($this->success as $iter786) { - $xfer += $output->writeString($iter772); + $xfer += $output->writeString($iter786); } } $output->writeListEnd(); @@ -14286,18 +14412,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size773 = 0; - $_ktype774 = 0; - $_vtype775 = 0; - $xfer += $input->readMapBegin($_ktype774, $_vtype775, $_size773); - for ($_i777 = 0; $_i777 < $_size773; ++$_i777) + $_size787 = 0; + $_ktype788 = 0; + $_vtype789 = 0; + $xfer += $input->readMapBegin($_ktype788, $_vtype789, $_size787); + for ($_i791 = 0; $_i791 < $_size787; ++$_i791) { - $key778 = ''; - $val779 = new \metastore\Type(); - $xfer += $input->readString($key778); - $val779 = new \metastore\Type(); - $xfer += $val779->read($input); - $this->success[$key778] = $val779; + $key792 = ''; + $val793 = new \metastore\Type(); + $xfer += $input->readString($key792); + $val793 = new \metastore\Type(); + $xfer += $val793->read($input); + $this->success[$key792] = $val793; } $xfer += $input->readMapEnd(); } else { @@ -14333,10 +14459,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter780 => $viter781) + foreach ($this->success as $kiter794 => $viter795) { - $xfer += $output->writeString($kiter780); - $xfer += $viter781->write($output); + $xfer += $output->writeString($kiter794); + $xfer += $viter795->write($output); } } $output->writeMapEnd(); @@ -14540,15 +14666,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size782 = 0; - $_etype785 = 0; - $xfer += $input->readListBegin($_etype785, $_size782); - for ($_i786 = 0; $_i786 < $_size782; ++$_i786) + $_size796 = 0; + $_etype799 = 0; + $xfer += $input->readListBegin($_etype799, $_size796); + for ($_i800 = 0; $_i800 < $_size796; ++$_i800) { - $elem787 = null; - $elem787 = new \metastore\FieldSchema(); - $xfer += $elem787->read($input); - $this->success []= $elem787; + $elem801 = null; + $elem801 = new \metastore\FieldSchema(); + $xfer += $elem801->read($input); + $this->success []= $elem801; } $xfer += $input->readListEnd(); } else { @@ -14600,9 +14726,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter788) + foreach ($this->success as $iter802) { - $xfer += $iter788->write($output); + $xfer += $iter802->write($output); } } $output->writeListEnd(); @@ -14844,15 +14970,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size789 = 0; - $_etype792 = 0; - $xfer += $input->readListBegin($_etype792, $_size789); - for ($_i793 = 0; $_i793 < $_size789; ++$_i793) + $_size803 = 0; + $_etype806 = 0; + $xfer += $input->readListBegin($_etype806, $_size803); + for ($_i807 = 0; $_i807 < $_size803; ++$_i807) { - $elem794 = null; - $elem794 = new \metastore\FieldSchema(); - $xfer += $elem794->read($input); - $this->success []= $elem794; + $elem808 = null; + $elem808 = new \metastore\FieldSchema(); + $xfer += $elem808->read($input); + $this->success []= $elem808; } $xfer += $input->readListEnd(); } else { @@ -14904,9 +15030,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter795) + foreach ($this->success as $iter809) { - $xfer += $iter795->write($output); + $xfer += $iter809->write($output); } } $output->writeListEnd(); @@ -15120,15 +15246,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size796 = 0; - $_etype799 = 0; - $xfer += $input->readListBegin($_etype799, $_size796); - for ($_i800 = 0; $_i800 < $_size796; ++$_i800) + $_size810 = 0; + $_etype813 = 0; + $xfer += $input->readListBegin($_etype813, $_size810); + for ($_i814 = 0; $_i814 < $_size810; ++$_i814) { - $elem801 = null; - $elem801 = new \metastore\FieldSchema(); - $xfer += $elem801->read($input); - $this->success []= $elem801; + $elem815 = null; + $elem815 = new \metastore\FieldSchema(); + $xfer += $elem815->read($input); + $this->success []= $elem815; } $xfer += $input->readListEnd(); } else { @@ -15180,9 +15306,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter802) + foreach ($this->success as $iter816) { - $xfer += $iter802->write($output); + $xfer += $iter816->write($output); } } $output->writeListEnd(); @@ -15424,15 +15550,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size803 = 0; - $_etype806 = 0; - $xfer += $input->readListBegin($_etype806, $_size803); - for ($_i807 = 0; $_i807 < $_size803; ++$_i807) + $_size817 = 0; + $_etype820 = 0; + $xfer += $input->readListBegin($_etype820, $_size817); + for ($_i821 = 0; $_i821 < $_size817; ++$_i821) { - $elem808 = null; - $elem808 = new \metastore\FieldSchema(); - $xfer += $elem808->read($input); - $this->success []= $elem808; + $elem822 = null; + $elem822 = new \metastore\FieldSchema(); + $xfer += $elem822->read($input); + $this->success []= $elem822; } $xfer += $input->readListEnd(); } else { @@ -15484,9 +15610,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter809) + foreach ($this->success as $iter823) { - $xfer += $iter809->write($output); + $xfer += $iter823->write($output); } } $output->writeListEnd(); @@ -16030,6 +16156,10 @@ class ThriftHiveMetastore_create_table_with_constraints_args { * @var \metastore\SQLNotNullConstraint[] */ public $notNullConstraints = null; + /** + * @var \metastore\SQLDefaultConstraint[] + */ + public $defaultConstraints = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -16075,6 +16205,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { 'class' => '\metastore\SQLNotNullConstraint', ), ), + 6 => array( + 'var' => 'defaultConstraints', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\SQLDefaultConstraint', + ), + ), ); } if (is_array($vals)) { @@ -16093,6 +16232,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { if (isset($vals['notNullConstraints'])) { $this->notNullConstraints = $vals['notNullConstraints']; } + if (isset($vals['defaultConstraints'])) { + $this->defaultConstraints = $vals['defaultConstraints']; + } } } @@ -16126,15 +16268,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size810 = 0; - $_etype813 = 0; - $xfer += $input->readListBegin($_etype813, $_size810); - for ($_i814 = 0; $_i814 < $_size810; ++$_i814) + $_size824 = 0; + $_etype827 = 0; + $xfer += $input->readListBegin($_etype827, $_size824); + for ($_i828 = 0; $_i828 < $_size824; ++$_i828) { - $elem815 = null; - $elem815 = new \metastore\SQLPrimaryKey(); - $xfer += $elem815->read($input); - $this->primaryKeys []= $elem815; + $elem829 = null; + $elem829 = new \metastore\SQLPrimaryKey(); + $xfer += $elem829->read($input); + $this->primaryKeys []= $elem829; } $xfer += $input->readListEnd(); } else { @@ -16144,15 +16286,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size816 = 0; - $_etype819 = 0; - $xfer += $input->readListBegin($_etype819, $_size816); - for ($_i820 = 0; $_i820 < $_size816; ++$_i820) + $_size830 = 0; + $_etype833 = 0; + $xfer += $input->readListBegin($_etype833, $_size830); + for ($_i834 = 0; $_i834 < $_size830; ++$_i834) { - $elem821 = null; - $elem821 = new \metastore\SQLForeignKey(); - $xfer += $elem821->read($input); - $this->foreignKeys []= $elem821; + $elem835 = null; + $elem835 = new \metastore\SQLForeignKey(); + $xfer += $elem835->read($input); + $this->foreignKeys []= $elem835; } $xfer += $input->readListEnd(); } else { @@ -16162,15 +16304,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size822 = 0; - $_etype825 = 0; - $xfer += $input->readListBegin($_etype825, $_size822); - for ($_i826 = 0; $_i826 < $_size822; ++$_i826) + $_size836 = 0; + $_etype839 = 0; + $xfer += $input->readListBegin($_etype839, $_size836); + for ($_i840 = 0; $_i840 < $_size836; ++$_i840) { - $elem827 = null; - $elem827 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem827->read($input); - $this->uniqueConstraints []= $elem827; + $elem841 = null; + $elem841 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem841->read($input); + $this->uniqueConstraints []= $elem841; } $xfer += $input->readListEnd(); } else { @@ -16180,15 +16322,33 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size828 = 0; - $_etype831 = 0; - $xfer += $input->readListBegin($_etype831, $_size828); - for ($_i832 = 0; $_i832 < $_size828; ++$_i832) + $_size842 = 0; + $_etype845 = 0; + $xfer += $input->readListBegin($_etype845, $_size842); + for ($_i846 = 0; $_i846 < $_size842; ++$_i846) { - $elem833 = null; - $elem833 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem833->read($input); - $this->notNullConstraints []= $elem833; + $elem847 = null; + $elem847 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem847->read($input); + $this->notNullConstraints []= $elem847; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::LST) { + $this->defaultConstraints = array(); + $_size848 = 0; + $_etype851 = 0; + $xfer += $input->readListBegin($_etype851, $_size848); + for ($_i852 = 0; $_i852 < $_size848; ++$_i852) + { + $elem853 = null; + $elem853 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem853->read($input); + $this->defaultConstraints []= $elem853; } $xfer += $input->readListEnd(); } else { @@ -16224,9 +16384,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter834) + foreach ($this->primaryKeys as $iter854) { - $xfer += $iter834->write($output); + $xfer += $iter854->write($output); } } $output->writeListEnd(); @@ -16241,9 +16401,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter835) + foreach ($this->foreignKeys as $iter855) { - $xfer += $iter835->write($output); + $xfer += $iter855->write($output); } } $output->writeListEnd(); @@ -16258,9 +16418,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter836) + foreach ($this->uniqueConstraints as $iter856) { - $xfer += $iter836->write($output); + $xfer += $iter856->write($output); } } $output->writeListEnd(); @@ -16275,9 +16435,26 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter837) + foreach ($this->notNullConstraints as $iter857) { - $xfer += $iter837->write($output); + $xfer += $iter857->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->defaultConstraints !== null) { + if (!is_array($this->defaultConstraints)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('defaultConstraints', TType::LST, 6); + { + $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); + { + foreach ($this->defaultConstraints as $iter858) + { + $xfer += $iter858->write($output); } } $output->writeListEnd(); @@ -17353,54 +17530,33 @@ class ThriftHiveMetastore_add_not_null_constraint_result { } -class ThriftHiveMetastore_drop_table_args { +class ThriftHiveMetastore_add_default_constraint_args { static $_TSPEC; /** - * @var string - */ - public $dbname = null; - /** - * @var string - */ - public $name = null; - /** - * @var bool + * @var \metastore\AddDefaultConstraintRequest */ - public $deleteData = null; + public $req = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbname', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'name', - 'type' => TType::STRING, - ), - 3 => array( - 'var' => 'deleteData', - 'type' => TType::BOOL, + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\AddDefaultConstraintRequest', ), ); } if (is_array($vals)) { - if (isset($vals['dbname'])) { - $this->dbname = $vals['dbname']; - } - if (isset($vals['name'])) { - $this->name = $vals['name']; - } - if (isset($vals['deleteData'])) { - $this->deleteData = $vals['deleteData']; + if (isset($vals['req'])) { + $this->req = $vals['req']; } } } public function getName() { - return 'ThriftHiveMetastore_drop_table_args'; + return 'ThriftHiveMetastore_add_default_constraint_args'; } public function read($input) @@ -17419,22 +17575,9 @@ class ThriftHiveMetastore_drop_table_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbname); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 3: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->deleteData); + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\AddDefaultConstraintRequest(); + $xfer += $this->req->read($input); } else { $xfer += $input->skip($ftype); } @@ -17451,20 +17594,13 @@ class ThriftHiveMetastore_drop_table_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_table_args'); - if ($this->dbname !== null) { - $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); - $xfer += $output->writeString($this->dbname); - $xfer += $output->writeFieldEnd(); - } - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 2); - $xfer += $output->writeString($this->name); - $xfer += $output->writeFieldEnd(); - } - if ($this->deleteData !== null) { - $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 3); - $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_default_constraint_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -17474,7 +17610,7 @@ class ThriftHiveMetastore_drop_table_args { } -class ThriftHiveMetastore_drop_table_result { +class ThriftHiveMetastore_add_default_constraint_result { static $_TSPEC; /** @@ -17484,7 +17620,7 @@ class ThriftHiveMetastore_drop_table_result { /** * @var \metastore\MetaException */ - public $o3 = null; + public $o2 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17495,7 +17631,7 @@ class ThriftHiveMetastore_drop_table_result { 'class' => '\metastore\NoSuchObjectException', ), 2 => array( - 'var' => 'o3', + 'var' => 'o2', 'type' => TType::STRUCT, 'class' => '\metastore\MetaException', ), @@ -17505,14 +17641,237 @@ class ThriftHiveMetastore_drop_table_result { if (isset($vals['o1'])) { $this->o1 = $vals['o1']; } - if (isset($vals['o3'])) { - $this->o3 = $vals['o3']; + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; } } } public function getName() { - return 'ThriftHiveMetastore_drop_table_result'; + return 'ThriftHiveMetastore_add_default_constraint_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_default_constraint_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_table_args { + static $_TSPEC; + + /** + * @var string + */ + public $dbname = null; + /** + * @var string + */ + public $name = null; + /** + * @var bool + */ + public $deleteData = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbname', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'deleteData', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbname'])) { + $this->dbname = $vals['dbname']; + } + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['deleteData'])) { + $this->deleteData = $vals['deleteData']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_table_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->deleteData); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_table_args'); + if ($this->dbname !== null) { + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); + $xfer += $output->writeString($this->dbname); + $xfer += $output->writeFieldEnd(); + } + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 2); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 3); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_table_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_table_result'; } public function read($input) @@ -17913,14 +18272,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size838 = 0; - $_etype841 = 0; - $xfer += $input->readListBegin($_etype841, $_size838); - for ($_i842 = 0; $_i842 < $_size838; ++$_i842) + $_size859 = 0; + $_etype862 = 0; + $xfer += $input->readListBegin($_etype862, $_size859); + for ($_i863 = 0; $_i863 < $_size859; ++$_i863) { - $elem843 = null; - $xfer += $input->readString($elem843); - $this->partNames []= $elem843; + $elem864 = null; + $xfer += $input->readString($elem864); + $this->partNames []= $elem864; } $xfer += $input->readListEnd(); } else { @@ -17958,9 +18317,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter844) + foreach ($this->partNames as $iter865) { - $xfer += $output->writeString($iter844); + $xfer += $output->writeString($iter865); } } $output->writeListEnd(); @@ -18211,14 +18570,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size845 = 0; - $_etype848 = 0; - $xfer += $input->readListBegin($_etype848, $_size845); - for ($_i849 = 0; $_i849 < $_size845; ++$_i849) + $_size866 = 0; + $_etype869 = 0; + $xfer += $input->readListBegin($_etype869, $_size866); + for ($_i870 = 0; $_i870 < $_size866; ++$_i870) { - $elem850 = null; - $xfer += $input->readString($elem850); - $this->success []= $elem850; + $elem871 = null; + $xfer += $input->readString($elem871); + $this->success []= $elem871; } $xfer += $input->readListEnd(); } else { @@ -18254,9 +18613,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter851) + foreach ($this->success as $iter872) { - $xfer += $output->writeString($iter851); + $xfer += $output->writeString($iter872); } } $output->writeListEnd(); @@ -18458,14 +18817,14 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size852 = 0; - $_etype855 = 0; - $xfer += $input->readListBegin($_etype855, $_size852); - for ($_i856 = 0; $_i856 < $_size852; ++$_i856) + $_size873 = 0; + $_etype876 = 0; + $xfer += $input->readListBegin($_etype876, $_size873); + for ($_i877 = 0; $_i877 < $_size873; ++$_i877) { - $elem857 = null; - $xfer += $input->readString($elem857); - $this->success []= $elem857; + $elem878 = null; + $xfer += $input->readString($elem878); + $this->success []= $elem878; } $xfer += $input->readListEnd(); } else { @@ -18501,9 +18860,9 @@ class ThriftHiveMetastore_get_tables_by_type_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter858) + foreach ($this->success as $iter879) { - $xfer += $output->writeString($iter858); + $xfer += $output->writeString($iter879); } } $output->writeListEnd(); @@ -18659,14 +19018,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size859 = 0; - $_etype862 = 0; - $xfer += $input->readListBegin($_etype862, $_size859); - for ($_i863 = 0; $_i863 < $_size859; ++$_i863) + $_size880 = 0; + $_etype883 = 0; + $xfer += $input->readListBegin($_etype883, $_size880); + for ($_i884 = 0; $_i884 < $_size880; ++$_i884) { - $elem864 = null; - $xfer += $input->readString($elem864); - $this->success []= $elem864; + $elem885 = null; + $xfer += $input->readString($elem885); + $this->success []= $elem885; } $xfer += $input->readListEnd(); } else { @@ -18702,9 +19061,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter865) + foreach ($this->success as $iter886) { - $xfer += $output->writeString($iter865); + $xfer += $output->writeString($iter886); } } $output->writeListEnd(); @@ -18809,14 +19168,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size866 = 0; - $_etype869 = 0; - $xfer += $input->readListBegin($_etype869, $_size866); - for ($_i870 = 0; $_i870 < $_size866; ++$_i870) + $_size887 = 0; + $_etype890 = 0; + $xfer += $input->readListBegin($_etype890, $_size887); + for ($_i891 = 0; $_i891 < $_size887; ++$_i891) { - $elem871 = null; - $xfer += $input->readString($elem871); - $this->tbl_types []= $elem871; + $elem892 = null; + $xfer += $input->readString($elem892); + $this->tbl_types []= $elem892; } $xfer += $input->readListEnd(); } else { @@ -18854,9 +19213,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter872) + foreach ($this->tbl_types as $iter893) { - $xfer += $output->writeString($iter872); + $xfer += $output->writeString($iter893); } } $output->writeListEnd(); @@ -18933,15 +19292,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size873 = 0; - $_etype876 = 0; - $xfer += $input->readListBegin($_etype876, $_size873); - for ($_i877 = 0; $_i877 < $_size873; ++$_i877) + $_size894 = 0; + $_etype897 = 0; + $xfer += $input->readListBegin($_etype897, $_size894); + for ($_i898 = 0; $_i898 < $_size894; ++$_i898) { - $elem878 = null; - $elem878 = new \metastore\TableMeta(); - $xfer += $elem878->read($input); - $this->success []= $elem878; + $elem899 = null; + $elem899 = new \metastore\TableMeta(); + $xfer += $elem899->read($input); + $this->success []= $elem899; } $xfer += $input->readListEnd(); } else { @@ -18977,9 +19336,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter879) + foreach ($this->success as $iter900) { - $xfer += $iter879->write($output); + $xfer += $iter900->write($output); } } $output->writeListEnd(); @@ -19135,14 +19494,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size880 = 0; - $_etype883 = 0; - $xfer += $input->readListBegin($_etype883, $_size880); - for ($_i884 = 0; $_i884 < $_size880; ++$_i884) + $_size901 = 0; + $_etype904 = 0; + $xfer += $input->readListBegin($_etype904, $_size901); + for ($_i905 = 0; $_i905 < $_size901; ++$_i905) { - $elem885 = null; - $xfer += $input->readString($elem885); - $this->success []= $elem885; + $elem906 = null; + $xfer += $input->readString($elem906); + $this->success []= $elem906; } $xfer += $input->readListEnd(); } else { @@ -19178,9 +19537,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter886) + foreach ($this->success as $iter907) { - $xfer += $output->writeString($iter886); + $xfer += $output->writeString($iter907); } } $output->writeListEnd(); @@ -19495,14 +19854,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size887 = 0; - $_etype890 = 0; - $xfer += $input->readListBegin($_etype890, $_size887); - for ($_i891 = 0; $_i891 < $_size887; ++$_i891) + $_size908 = 0; + $_etype911 = 0; + $xfer += $input->readListBegin($_etype911, $_size908); + for ($_i912 = 0; $_i912 < $_size908; ++$_i912) { - $elem892 = null; - $xfer += $input->readString($elem892); - $this->tbl_names []= $elem892; + $elem913 = null; + $xfer += $input->readString($elem913); + $this->tbl_names []= $elem913; } $xfer += $input->readListEnd(); } else { @@ -19535,9 +19894,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter893) + foreach ($this->tbl_names as $iter914) { - $xfer += $output->writeString($iter893); + $xfer += $output->writeString($iter914); } } $output->writeListEnd(); @@ -19602,15 +19961,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size894 = 0; - $_etype897 = 0; - $xfer += $input->readListBegin($_etype897, $_size894); - for ($_i898 = 0; $_i898 < $_size894; ++$_i898) + $_size915 = 0; + $_etype918 = 0; + $xfer += $input->readListBegin($_etype918, $_size915); + for ($_i919 = 0; $_i919 < $_size915; ++$_i919) { - $elem899 = null; - $elem899 = new \metastore\Table(); - $xfer += $elem899->read($input); - $this->success []= $elem899; + $elem920 = null; + $elem920 = new \metastore\Table(); + $xfer += $elem920->read($input); + $this->success []= $elem920; } $xfer += $input->readListEnd(); } else { @@ -19638,9 +19997,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter900) + foreach ($this->success as $iter921) { - $xfer += $iter900->write($output); + $xfer += $iter921->write($output); } } $output->writeListEnd(); @@ -20167,14 +20526,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size901 = 0; - $_etype904 = 0; - $xfer += $input->readListBegin($_etype904, $_size901); - for ($_i905 = 0; $_i905 < $_size901; ++$_i905) + $_size922 = 0; + $_etype925 = 0; + $xfer += $input->readListBegin($_etype925, $_size922); + for ($_i926 = 0; $_i926 < $_size922; ++$_i926) { - $elem906 = null; - $xfer += $input->readString($elem906); - $this->tbl_names []= $elem906; + $elem927 = null; + $xfer += $input->readString($elem927); + $this->tbl_names []= $elem927; } $xfer += $input->readListEnd(); } else { @@ -20207,9 +20566,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter907) + foreach ($this->tbl_names as $iter928) { - $xfer += $output->writeString($iter907); + $xfer += $output->writeString($iter928); } } $output->writeListEnd(); @@ -20314,18 +20673,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size908 = 0; - $_ktype909 = 0; - $_vtype910 = 0; - $xfer += $input->readMapBegin($_ktype909, $_vtype910, $_size908); - for ($_i912 = 0; $_i912 < $_size908; ++$_i912) + $_size929 = 0; + $_ktype930 = 0; + $_vtype931 = 0; + $xfer += $input->readMapBegin($_ktype930, $_vtype931, $_size929); + for ($_i933 = 0; $_i933 < $_size929; ++$_i933) { - $key913 = ''; - $val914 = new \metastore\Materialization(); - $xfer += $input->readString($key913); - $val914 = new \metastore\Materialization(); - $xfer += $val914->read($input); - $this->success[$key913] = $val914; + $key934 = ''; + $val935 = new \metastore\Materialization(); + $xfer += $input->readString($key934); + $val935 = new \metastore\Materialization(); + $xfer += $val935->read($input); + $this->success[$key934] = $val935; } $xfer += $input->readMapEnd(); } else { @@ -20377,10 +20736,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter915 => $viter916) + foreach ($this->success as $kiter936 => $viter937) { - $xfer += $output->writeString($kiter915); - $xfer += $viter916->write($output); + $xfer += $output->writeString($kiter936); + $xfer += $viter937->write($output); } } $output->writeMapEnd(); @@ -20869,14 +21228,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size917 = 0; - $_etype920 = 0; - $xfer += $input->readListBegin($_etype920, $_size917); - for ($_i921 = 0; $_i921 < $_size917; ++$_i921) + $_size938 = 0; + $_etype941 = 0; + $xfer += $input->readListBegin($_etype941, $_size938); + for ($_i942 = 0; $_i942 < $_size938; ++$_i942) { - $elem922 = null; - $xfer += $input->readString($elem922); - $this->success []= $elem922; + $elem943 = null; + $xfer += $input->readString($elem943); + $this->success []= $elem943; } $xfer += $input->readListEnd(); } else { @@ -20928,9 +21287,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter923) + foreach ($this->success as $iter944) { - $xfer += $output->writeString($iter923); + $xfer += $output->writeString($iter944); } } $output->writeListEnd(); @@ -22243,15 +22602,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size924 = 0; - $_etype927 = 0; - $xfer += $input->readListBegin($_etype927, $_size924); - for ($_i928 = 0; $_i928 < $_size924; ++$_i928) + $_size945 = 0; + $_etype948 = 0; + $xfer += $input->readListBegin($_etype948, $_size945); + for ($_i949 = 0; $_i949 < $_size945; ++$_i949) { - $elem929 = null; - $elem929 = new \metastore\Partition(); - $xfer += $elem929->read($input); - $this->new_parts []= $elem929; + $elem950 = null; + $elem950 = new \metastore\Partition(); + $xfer += $elem950->read($input); + $this->new_parts []= $elem950; } $xfer += $input->readListEnd(); } else { @@ -22279,9 +22638,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter930) + foreach ($this->new_parts as $iter951) { - $xfer += $iter930->write($output); + $xfer += $iter951->write($output); } } $output->writeListEnd(); @@ -22496,15 +22855,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size931 = 0; - $_etype934 = 0; - $xfer += $input->readListBegin($_etype934, $_size931); - for ($_i935 = 0; $_i935 < $_size931; ++$_i935) + $_size952 = 0; + $_etype955 = 0; + $xfer += $input->readListBegin($_etype955, $_size952); + for ($_i956 = 0; $_i956 < $_size952; ++$_i956) { - $elem936 = null; - $elem936 = new \metastore\PartitionSpec(); - $xfer += $elem936->read($input); - $this->new_parts []= $elem936; + $elem957 = null; + $elem957 = new \metastore\PartitionSpec(); + $xfer += $elem957->read($input); + $this->new_parts []= $elem957; } $xfer += $input->readListEnd(); } else { @@ -22532,9 +22891,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter937) + foreach ($this->new_parts as $iter958) { - $xfer += $iter937->write($output); + $xfer += $iter958->write($output); } } $output->writeListEnd(); @@ -22784,14 +23143,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size938 = 0; - $_etype941 = 0; - $xfer += $input->readListBegin($_etype941, $_size938); - for ($_i942 = 0; $_i942 < $_size938; ++$_i942) + $_size959 = 0; + $_etype962 = 0; + $xfer += $input->readListBegin($_etype962, $_size959); + for ($_i963 = 0; $_i963 < $_size959; ++$_i963) { - $elem943 = null; - $xfer += $input->readString($elem943); - $this->part_vals []= $elem943; + $elem964 = null; + $xfer += $input->readString($elem964); + $this->part_vals []= $elem964; } $xfer += $input->readListEnd(); } else { @@ -22829,9 +23188,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter944) + foreach ($this->part_vals as $iter965) { - $xfer += $output->writeString($iter944); + $xfer += $output->writeString($iter965); } } $output->writeListEnd(); @@ -23333,14 +23692,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size945 = 0; - $_etype948 = 0; - $xfer += $input->readListBegin($_etype948, $_size945); - for ($_i949 = 0; $_i949 < $_size945; ++$_i949) + $_size966 = 0; + $_etype969 = 0; + $xfer += $input->readListBegin($_etype969, $_size966); + for ($_i970 = 0; $_i970 < $_size966; ++$_i970) { - $elem950 = null; - $xfer += $input->readString($elem950); - $this->part_vals []= $elem950; + $elem971 = null; + $xfer += $input->readString($elem971); + $this->part_vals []= $elem971; } $xfer += $input->readListEnd(); } else { @@ -23386,9 +23745,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter951) + foreach ($this->part_vals as $iter972) { - $xfer += $output->writeString($iter951); + $xfer += $output->writeString($iter972); } } $output->writeListEnd(); @@ -24242,14 +24601,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size952 = 0; - $_etype955 = 0; - $xfer += $input->readListBegin($_etype955, $_size952); - for ($_i956 = 0; $_i956 < $_size952; ++$_i956) + $_size973 = 0; + $_etype976 = 0; + $xfer += $input->readListBegin($_etype976, $_size973); + for ($_i977 = 0; $_i977 < $_size973; ++$_i977) { - $elem957 = null; - $xfer += $input->readString($elem957); - $this->part_vals []= $elem957; + $elem978 = null; + $xfer += $input->readString($elem978); + $this->part_vals []= $elem978; } $xfer += $input->readListEnd(); } else { @@ -24294,9 +24653,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter958) + foreach ($this->part_vals as $iter979) { - $xfer += $output->writeString($iter958); + $xfer += $output->writeString($iter979); } } $output->writeListEnd(); @@ -24549,14 +24908,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size959 = 0; - $_etype962 = 0; - $xfer += $input->readListBegin($_etype962, $_size959); - for ($_i963 = 0; $_i963 < $_size959; ++$_i963) + $_size980 = 0; + $_etype983 = 0; + $xfer += $input->readListBegin($_etype983, $_size980); + for ($_i984 = 0; $_i984 < $_size980; ++$_i984) { - $elem964 = null; - $xfer += $input->readString($elem964); - $this->part_vals []= $elem964; + $elem985 = null; + $xfer += $input->readString($elem985); + $this->part_vals []= $elem985; } $xfer += $input->readListEnd(); } else { @@ -24609,9 +24968,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter965) + foreach ($this->part_vals as $iter986) { - $xfer += $output->writeString($iter965); + $xfer += $output->writeString($iter986); } } $output->writeListEnd(); @@ -25625,14 +25984,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size966 = 0; - $_etype969 = 0; - $xfer += $input->readListBegin($_etype969, $_size966); - for ($_i970 = 0; $_i970 < $_size966; ++$_i970) + $_size987 = 0; + $_etype990 = 0; + $xfer += $input->readListBegin($_etype990, $_size987); + for ($_i991 = 0; $_i991 < $_size987; ++$_i991) { - $elem971 = null; - $xfer += $input->readString($elem971); - $this->part_vals []= $elem971; + $elem992 = null; + $xfer += $input->readString($elem992); + $this->part_vals []= $elem992; } $xfer += $input->readListEnd(); } else { @@ -25670,9 +26029,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter972) + foreach ($this->part_vals as $iter993) { - $xfer += $output->writeString($iter972); + $xfer += $output->writeString($iter993); } } $output->writeListEnd(); @@ -25914,17 +26273,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size973 = 0; - $_ktype974 = 0; - $_vtype975 = 0; - $xfer += $input->readMapBegin($_ktype974, $_vtype975, $_size973); - for ($_i977 = 0; $_i977 < $_size973; ++$_i977) + $_size994 = 0; + $_ktype995 = 0; + $_vtype996 = 0; + $xfer += $input->readMapBegin($_ktype995, $_vtype996, $_size994); + for ($_i998 = 0; $_i998 < $_size994; ++$_i998) { - $key978 = ''; - $val979 = ''; - $xfer += $input->readString($key978); - $xfer += $input->readString($val979); - $this->partitionSpecs[$key978] = $val979; + $key999 = ''; + $val1000 = ''; + $xfer += $input->readString($key999); + $xfer += $input->readString($val1000); + $this->partitionSpecs[$key999] = $val1000; } $xfer += $input->readMapEnd(); } else { @@ -25980,10 +26339,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter980 => $viter981) + foreach ($this->partitionSpecs as $kiter1001 => $viter1002) { - $xfer += $output->writeString($kiter980); - $xfer += $output->writeString($viter981); + $xfer += $output->writeString($kiter1001); + $xfer += $output->writeString($viter1002); } } $output->writeMapEnd(); @@ -26295,17 +26654,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size982 = 0; - $_ktype983 = 0; - $_vtype984 = 0; - $xfer += $input->readMapBegin($_ktype983, $_vtype984, $_size982); - for ($_i986 = 0; $_i986 < $_size982; ++$_i986) + $_size1003 = 0; + $_ktype1004 = 0; + $_vtype1005 = 0; + $xfer += $input->readMapBegin($_ktype1004, $_vtype1005, $_size1003); + for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007) { - $key987 = ''; - $val988 = ''; - $xfer += $input->readString($key987); - $xfer += $input->readString($val988); - $this->partitionSpecs[$key987] = $val988; + $key1008 = ''; + $val1009 = ''; + $xfer += $input->readString($key1008); + $xfer += $input->readString($val1009); + $this->partitionSpecs[$key1008] = $val1009; } $xfer += $input->readMapEnd(); } else { @@ -26361,10 +26720,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter989 => $viter990) + foreach ($this->partitionSpecs as $kiter1010 => $viter1011) { - $xfer += $output->writeString($kiter989); - $xfer += $output->writeString($viter990); + $xfer += $output->writeString($kiter1010); + $xfer += $output->writeString($viter1011); } } $output->writeMapEnd(); @@ -26497,15 +26856,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size991 = 0; - $_etype994 = 0; - $xfer += $input->readListBegin($_etype994, $_size991); - for ($_i995 = 0; $_i995 < $_size991; ++$_i995) + $_size1012 = 0; + $_etype1015 = 0; + $xfer += $input->readListBegin($_etype1015, $_size1012); + for ($_i1016 = 0; $_i1016 < $_size1012; ++$_i1016) { - $elem996 = null; - $elem996 = new \metastore\Partition(); - $xfer += $elem996->read($input); - $this->success []= $elem996; + $elem1017 = null; + $elem1017 = new \metastore\Partition(); + $xfer += $elem1017->read($input); + $this->success []= $elem1017; } $xfer += $input->readListEnd(); } else { @@ -26565,9 +26924,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter997) + foreach ($this->success as $iter1018) { - $xfer += $iter997->write($output); + $xfer += $iter1018->write($output); } } $output->writeListEnd(); @@ -26713,14 +27072,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size998 = 0; - $_etype1001 = 0; - $xfer += $input->readListBegin($_etype1001, $_size998); - for ($_i1002 = 0; $_i1002 < $_size998; ++$_i1002) + $_size1019 = 0; + $_etype1022 = 0; + $xfer += $input->readListBegin($_etype1022, $_size1019); + for ($_i1023 = 0; $_i1023 < $_size1019; ++$_i1023) { - $elem1003 = null; - $xfer += $input->readString($elem1003); - $this->part_vals []= $elem1003; + $elem1024 = null; + $xfer += $input->readString($elem1024); + $this->part_vals []= $elem1024; } $xfer += $input->readListEnd(); } else { @@ -26737,14 +27096,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1004 = 0; - $_etype1007 = 0; - $xfer += $input->readListBegin($_etype1007, $_size1004); - for ($_i1008 = 0; $_i1008 < $_size1004; ++$_i1008) + $_size1025 = 0; + $_etype1028 = 0; + $xfer += $input->readListBegin($_etype1028, $_size1025); + for ($_i1029 = 0; $_i1029 < $_size1025; ++$_i1029) { - $elem1009 = null; - $xfer += $input->readString($elem1009); - $this->group_names []= $elem1009; + $elem1030 = null; + $xfer += $input->readString($elem1030); + $this->group_names []= $elem1030; } $xfer += $input->readListEnd(); } else { @@ -26782,9 +27141,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1010) + foreach ($this->part_vals as $iter1031) { - $xfer += $output->writeString($iter1010); + $xfer += $output->writeString($iter1031); } } $output->writeListEnd(); @@ -26804,9 +27163,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1011) + foreach ($this->group_names as $iter1032) { - $xfer += $output->writeString($iter1011); + $xfer += $output->writeString($iter1032); } } $output->writeListEnd(); @@ -27397,15 +27756,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1012 = 0; - $_etype1015 = 0; - $xfer += $input->readListBegin($_etype1015, $_size1012); - for ($_i1016 = 0; $_i1016 < $_size1012; ++$_i1016) + $_size1033 = 0; + $_etype1036 = 0; + $xfer += $input->readListBegin($_etype1036, $_size1033); + for ($_i1037 = 0; $_i1037 < $_size1033; ++$_i1037) { - $elem1017 = null; - $elem1017 = new \metastore\Partition(); - $xfer += $elem1017->read($input); - $this->success []= $elem1017; + $elem1038 = null; + $elem1038 = new \metastore\Partition(); + $xfer += $elem1038->read($input); + $this->success []= $elem1038; } $xfer += $input->readListEnd(); } else { @@ -27449,9 +27808,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1018) + foreach ($this->success as $iter1039) { - $xfer += $iter1018->write($output); + $xfer += $iter1039->write($output); } } $output->writeListEnd(); @@ -27597,14 +27956,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1019 = 0; - $_etype1022 = 0; - $xfer += $input->readListBegin($_etype1022, $_size1019); - for ($_i1023 = 0; $_i1023 < $_size1019; ++$_i1023) + $_size1040 = 0; + $_etype1043 = 0; + $xfer += $input->readListBegin($_etype1043, $_size1040); + for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) { - $elem1024 = null; - $xfer += $input->readString($elem1024); - $this->group_names []= $elem1024; + $elem1045 = null; + $xfer += $input->readString($elem1045); + $this->group_names []= $elem1045; } $xfer += $input->readListEnd(); } else { @@ -27652,9 +28011,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1025) + foreach ($this->group_names as $iter1046) { - $xfer += $output->writeString($iter1025); + $xfer += $output->writeString($iter1046); } } $output->writeListEnd(); @@ -27743,15 +28102,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1026 = 0; - $_etype1029 = 0; - $xfer += $input->readListBegin($_etype1029, $_size1026); - for ($_i1030 = 0; $_i1030 < $_size1026; ++$_i1030) + $_size1047 = 0; + $_etype1050 = 0; + $xfer += $input->readListBegin($_etype1050, $_size1047); + for ($_i1051 = 0; $_i1051 < $_size1047; ++$_i1051) { - $elem1031 = null; - $elem1031 = new \metastore\Partition(); - $xfer += $elem1031->read($input); - $this->success []= $elem1031; + $elem1052 = null; + $elem1052 = new \metastore\Partition(); + $xfer += $elem1052->read($input); + $this->success []= $elem1052; } $xfer += $input->readListEnd(); } else { @@ -27795,9 +28154,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1032) + foreach ($this->success as $iter1053) { - $xfer += $iter1032->write($output); + $xfer += $iter1053->write($output); } } $output->writeListEnd(); @@ -28017,15 +28376,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1033 = 0; - $_etype1036 = 0; - $xfer += $input->readListBegin($_etype1036, $_size1033); - for ($_i1037 = 0; $_i1037 < $_size1033; ++$_i1037) + $_size1054 = 0; + $_etype1057 = 0; + $xfer += $input->readListBegin($_etype1057, $_size1054); + for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058) { - $elem1038 = null; - $elem1038 = new \metastore\PartitionSpec(); - $xfer += $elem1038->read($input); - $this->success []= $elem1038; + $elem1059 = null; + $elem1059 = new \metastore\PartitionSpec(); + $xfer += $elem1059->read($input); + $this->success []= $elem1059; } $xfer += $input->readListEnd(); } else { @@ -28069,9 +28428,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1039) + foreach ($this->success as $iter1060) { - $xfer += $iter1039->write($output); + $xfer += $iter1060->write($output); } } $output->writeListEnd(); @@ -28290,14 +28649,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1040 = 0; - $_etype1043 = 0; - $xfer += $input->readListBegin($_etype1043, $_size1040); - for ($_i1044 = 0; $_i1044 < $_size1040; ++$_i1044) + $_size1061 = 0; + $_etype1064 = 0; + $xfer += $input->readListBegin($_etype1064, $_size1061); + for ($_i1065 = 0; $_i1065 < $_size1061; ++$_i1065) { - $elem1045 = null; - $xfer += $input->readString($elem1045); - $this->success []= $elem1045; + $elem1066 = null; + $xfer += $input->readString($elem1066); + $this->success []= $elem1066; } $xfer += $input->readListEnd(); } else { @@ -28341,9 +28700,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1046) + foreach ($this->success as $iter1067) { - $xfer += $output->writeString($iter1046); + $xfer += $output->writeString($iter1067); } } $output->writeListEnd(); @@ -28674,14 +29033,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1047 = 0; - $_etype1050 = 0; - $xfer += $input->readListBegin($_etype1050, $_size1047); - for ($_i1051 = 0; $_i1051 < $_size1047; ++$_i1051) + $_size1068 = 0; + $_etype1071 = 0; + $xfer += $input->readListBegin($_etype1071, $_size1068); + for ($_i1072 = 0; $_i1072 < $_size1068; ++$_i1072) { - $elem1052 = null; - $xfer += $input->readString($elem1052); - $this->part_vals []= $elem1052; + $elem1073 = null; + $xfer += $input->readString($elem1073); + $this->part_vals []= $elem1073; } $xfer += $input->readListEnd(); } else { @@ -28726,9 +29085,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1053) + foreach ($this->part_vals as $iter1074) { - $xfer += $output->writeString($iter1053); + $xfer += $output->writeString($iter1074); } } $output->writeListEnd(); @@ -28822,15 +29181,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1054 = 0; - $_etype1057 = 0; - $xfer += $input->readListBegin($_etype1057, $_size1054); - for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058) + $_size1075 = 0; + $_etype1078 = 0; + $xfer += $input->readListBegin($_etype1078, $_size1075); + for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) { - $elem1059 = null; - $elem1059 = new \metastore\Partition(); - $xfer += $elem1059->read($input); - $this->success []= $elem1059; + $elem1080 = null; + $elem1080 = new \metastore\Partition(); + $xfer += $elem1080->read($input); + $this->success []= $elem1080; } $xfer += $input->readListEnd(); } else { @@ -28874,9 +29233,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1060) + foreach ($this->success as $iter1081) { - $xfer += $iter1060->write($output); + $xfer += $iter1081->write($output); } } $output->writeListEnd(); @@ -29023,14 +29382,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1061 = 0; - $_etype1064 = 0; - $xfer += $input->readListBegin($_etype1064, $_size1061); - for ($_i1065 = 0; $_i1065 < $_size1061; ++$_i1065) + $_size1082 = 0; + $_etype1085 = 0; + $xfer += $input->readListBegin($_etype1085, $_size1082); + for ($_i1086 = 0; $_i1086 < $_size1082; ++$_i1086) { - $elem1066 = null; - $xfer += $input->readString($elem1066); - $this->part_vals []= $elem1066; + $elem1087 = null; + $xfer += $input->readString($elem1087); + $this->part_vals []= $elem1087; } $xfer += $input->readListEnd(); } else { @@ -29054,14 +29413,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1067 = 0; - $_etype1070 = 0; - $xfer += $input->readListBegin($_etype1070, $_size1067); - for ($_i1071 = 0; $_i1071 < $_size1067; ++$_i1071) + $_size1088 = 0; + $_etype1091 = 0; + $xfer += $input->readListBegin($_etype1091, $_size1088); + for ($_i1092 = 0; $_i1092 < $_size1088; ++$_i1092) { - $elem1072 = null; - $xfer += $input->readString($elem1072); - $this->group_names []= $elem1072; + $elem1093 = null; + $xfer += $input->readString($elem1093); + $this->group_names []= $elem1093; } $xfer += $input->readListEnd(); } else { @@ -29099,9 +29458,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1073) + foreach ($this->part_vals as $iter1094) { - $xfer += $output->writeString($iter1073); + $xfer += $output->writeString($iter1094); } } $output->writeListEnd(); @@ -29126,9 +29485,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1074) + foreach ($this->group_names as $iter1095) { - $xfer += $output->writeString($iter1074); + $xfer += $output->writeString($iter1095); } } $output->writeListEnd(); @@ -29217,15 +29576,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1075 = 0; - $_etype1078 = 0; - $xfer += $input->readListBegin($_etype1078, $_size1075); - for ($_i1079 = 0; $_i1079 < $_size1075; ++$_i1079) + $_size1096 = 0; + $_etype1099 = 0; + $xfer += $input->readListBegin($_etype1099, $_size1096); + for ($_i1100 = 0; $_i1100 < $_size1096; ++$_i1100) { - $elem1080 = null; - $elem1080 = new \metastore\Partition(); - $xfer += $elem1080->read($input); - $this->success []= $elem1080; + $elem1101 = null; + $elem1101 = new \metastore\Partition(); + $xfer += $elem1101->read($input); + $this->success []= $elem1101; } $xfer += $input->readListEnd(); } else { @@ -29269,9 +29628,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1081) + foreach ($this->success as $iter1102) { - $xfer += $iter1081->write($output); + $xfer += $iter1102->write($output); } } $output->writeListEnd(); @@ -29392,14 +29751,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1082 = 0; - $_etype1085 = 0; - $xfer += $input->readListBegin($_etype1085, $_size1082); - for ($_i1086 = 0; $_i1086 < $_size1082; ++$_i1086) + $_size1103 = 0; + $_etype1106 = 0; + $xfer += $input->readListBegin($_etype1106, $_size1103); + for ($_i1107 = 0; $_i1107 < $_size1103; ++$_i1107) { - $elem1087 = null; - $xfer += $input->readString($elem1087); - $this->part_vals []= $elem1087; + $elem1108 = null; + $xfer += $input->readString($elem1108); + $this->part_vals []= $elem1108; } $xfer += $input->readListEnd(); } else { @@ -29444,9 +29803,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1088) + foreach ($this->part_vals as $iter1109) { - $xfer += $output->writeString($iter1088); + $xfer += $output->writeString($iter1109); } } $output->writeListEnd(); @@ -29539,14 +29898,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1089 = 0; - $_etype1092 = 0; - $xfer += $input->readListBegin($_etype1092, $_size1089); - for ($_i1093 = 0; $_i1093 < $_size1089; ++$_i1093) + $_size1110 = 0; + $_etype1113 = 0; + $xfer += $input->readListBegin($_etype1113, $_size1110); + for ($_i1114 = 0; $_i1114 < $_size1110; ++$_i1114) { - $elem1094 = null; - $xfer += $input->readString($elem1094); - $this->success []= $elem1094; + $elem1115 = null; + $xfer += $input->readString($elem1115); + $this->success []= $elem1115; } $xfer += $input->readListEnd(); } else { @@ -29590,9 +29949,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1095) + foreach ($this->success as $iter1116) { - $xfer += $output->writeString($iter1095); + $xfer += $output->writeString($iter1116); } } $output->writeListEnd(); @@ -29835,15 +30194,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1096 = 0; - $_etype1099 = 0; - $xfer += $input->readListBegin($_etype1099, $_size1096); - for ($_i1100 = 0; $_i1100 < $_size1096; ++$_i1100) + $_size1117 = 0; + $_etype1120 = 0; + $xfer += $input->readListBegin($_etype1120, $_size1117); + for ($_i1121 = 0; $_i1121 < $_size1117; ++$_i1121) { - $elem1101 = null; - $elem1101 = new \metastore\Partition(); - $xfer += $elem1101->read($input); - $this->success []= $elem1101; + $elem1122 = null; + $elem1122 = new \metastore\Partition(); + $xfer += $elem1122->read($input); + $this->success []= $elem1122; } $xfer += $input->readListEnd(); } else { @@ -29887,9 +30246,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1102) + foreach ($this->success as $iter1123) { - $xfer += $iter1102->write($output); + $xfer += $iter1123->write($output); } } $output->writeListEnd(); @@ -30132,15 +30491,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1103 = 0; - $_etype1106 = 0; - $xfer += $input->readListBegin($_etype1106, $_size1103); - for ($_i1107 = 0; $_i1107 < $_size1103; ++$_i1107) + $_size1124 = 0; + $_etype1127 = 0; + $xfer += $input->readListBegin($_etype1127, $_size1124); + for ($_i1128 = 0; $_i1128 < $_size1124; ++$_i1128) { - $elem1108 = null; - $elem1108 = new \metastore\PartitionSpec(); - $xfer += $elem1108->read($input); - $this->success []= $elem1108; + $elem1129 = null; + $elem1129 = new \metastore\PartitionSpec(); + $xfer += $elem1129->read($input); + $this->success []= $elem1129; } $xfer += $input->readListEnd(); } else { @@ -30184,9 +30543,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1109) + foreach ($this->success as $iter1130) { - $xfer += $iter1109->write($output); + $xfer += $iter1130->write($output); } } $output->writeListEnd(); @@ -30752,14 +31111,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1110 = 0; - $_etype1113 = 0; - $xfer += $input->readListBegin($_etype1113, $_size1110); - for ($_i1114 = 0; $_i1114 < $_size1110; ++$_i1114) + $_size1131 = 0; + $_etype1134 = 0; + $xfer += $input->readListBegin($_etype1134, $_size1131); + for ($_i1135 = 0; $_i1135 < $_size1131; ++$_i1135) { - $elem1115 = null; - $xfer += $input->readString($elem1115); - $this->names []= $elem1115; + $elem1136 = null; + $xfer += $input->readString($elem1136); + $this->names []= $elem1136; } $xfer += $input->readListEnd(); } else { @@ -30797,9 +31156,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1116) + foreach ($this->names as $iter1137) { - $xfer += $output->writeString($iter1116); + $xfer += $output->writeString($iter1137); } } $output->writeListEnd(); @@ -30888,15 +31247,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1117 = 0; - $_etype1120 = 0; - $xfer += $input->readListBegin($_etype1120, $_size1117); - for ($_i1121 = 0; $_i1121 < $_size1117; ++$_i1121) + $_size1138 = 0; + $_etype1141 = 0; + $xfer += $input->readListBegin($_etype1141, $_size1138); + for ($_i1142 = 0; $_i1142 < $_size1138; ++$_i1142) { - $elem1122 = null; - $elem1122 = new \metastore\Partition(); - $xfer += $elem1122->read($input); - $this->success []= $elem1122; + $elem1143 = null; + $elem1143 = new \metastore\Partition(); + $xfer += $elem1143->read($input); + $this->success []= $elem1143; } $xfer += $input->readListEnd(); } else { @@ -30940,9 +31299,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1123) + foreach ($this->success as $iter1144) { - $xfer += $iter1123->write($output); + $xfer += $iter1144->write($output); } } $output->writeListEnd(); @@ -31281,15 +31640,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1124 = 0; - $_etype1127 = 0; - $xfer += $input->readListBegin($_etype1127, $_size1124); - for ($_i1128 = 0; $_i1128 < $_size1124; ++$_i1128) + $_size1145 = 0; + $_etype1148 = 0; + $xfer += $input->readListBegin($_etype1148, $_size1145); + for ($_i1149 = 0; $_i1149 < $_size1145; ++$_i1149) { - $elem1129 = null; - $elem1129 = new \metastore\Partition(); - $xfer += $elem1129->read($input); - $this->new_parts []= $elem1129; + $elem1150 = null; + $elem1150 = new \metastore\Partition(); + $xfer += $elem1150->read($input); + $this->new_parts []= $elem1150; } $xfer += $input->readListEnd(); } else { @@ -31327,9 +31686,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1130) + foreach ($this->new_parts as $iter1151) { - $xfer += $iter1130->write($output); + $xfer += $iter1151->write($output); } } $output->writeListEnd(); @@ -31544,15 +31903,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1131 = 0; - $_etype1134 = 0; - $xfer += $input->readListBegin($_etype1134, $_size1131); - for ($_i1135 = 0; $_i1135 < $_size1131; ++$_i1135) + $_size1152 = 0; + $_etype1155 = 0; + $xfer += $input->readListBegin($_etype1155, $_size1152); + for ($_i1156 = 0; $_i1156 < $_size1152; ++$_i1156) { - $elem1136 = null; - $elem1136 = new \metastore\Partition(); - $xfer += $elem1136->read($input); - $this->new_parts []= $elem1136; + $elem1157 = null; + $elem1157 = new \metastore\Partition(); + $xfer += $elem1157->read($input); + $this->new_parts []= $elem1157; } $xfer += $input->readListEnd(); } else { @@ -31598,9 +31957,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1137) + foreach ($this->new_parts as $iter1158) { - $xfer += $iter1137->write($output); + $xfer += $iter1158->write($output); } } $output->writeListEnd(); @@ -32078,14 +32437,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1138 = 0; - $_etype1141 = 0; - $xfer += $input->readListBegin($_etype1141, $_size1138); - for ($_i1142 = 0; $_i1142 < $_size1138; ++$_i1142) + $_size1159 = 0; + $_etype1162 = 0; + $xfer += $input->readListBegin($_etype1162, $_size1159); + for ($_i1163 = 0; $_i1163 < $_size1159; ++$_i1163) { - $elem1143 = null; - $xfer += $input->readString($elem1143); - $this->part_vals []= $elem1143; + $elem1164 = null; + $xfer += $input->readString($elem1164); + $this->part_vals []= $elem1164; } $xfer += $input->readListEnd(); } else { @@ -32131,9 +32490,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1144) + foreach ($this->part_vals as $iter1165) { - $xfer += $output->writeString($iter1144); + $xfer += $output->writeString($iter1165); } } $output->writeListEnd(); @@ -32318,14 +32677,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1145 = 0; - $_etype1148 = 0; - $xfer += $input->readListBegin($_etype1148, $_size1145); - for ($_i1149 = 0; $_i1149 < $_size1145; ++$_i1149) + $_size1166 = 0; + $_etype1169 = 0; + $xfer += $input->readListBegin($_etype1169, $_size1166); + for ($_i1170 = 0; $_i1170 < $_size1166; ++$_i1170) { - $elem1150 = null; - $xfer += $input->readString($elem1150); - $this->part_vals []= $elem1150; + $elem1171 = null; + $xfer += $input->readString($elem1171); + $this->part_vals []= $elem1171; } $xfer += $input->readListEnd(); } else { @@ -32360,9 +32719,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1151) + foreach ($this->part_vals as $iter1172) { - $xfer += $output->writeString($iter1151); + $xfer += $output->writeString($iter1172); } } $output->writeListEnd(); @@ -32816,14 +33175,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1152 = 0; - $_etype1155 = 0; - $xfer += $input->readListBegin($_etype1155, $_size1152); - for ($_i1156 = 0; $_i1156 < $_size1152; ++$_i1156) + $_size1173 = 0; + $_etype1176 = 0; + $xfer += $input->readListBegin($_etype1176, $_size1173); + for ($_i1177 = 0; $_i1177 < $_size1173; ++$_i1177) { - $elem1157 = null; - $xfer += $input->readString($elem1157); - $this->success []= $elem1157; + $elem1178 = null; + $xfer += $input->readString($elem1178); + $this->success []= $elem1178; } $xfer += $input->readListEnd(); } else { @@ -32859,9 +33218,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1158) + foreach ($this->success as $iter1179) { - $xfer += $output->writeString($iter1158); + $xfer += $output->writeString($iter1179); } } $output->writeListEnd(); @@ -33021,17 +33380,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1159 = 0; - $_ktype1160 = 0; - $_vtype1161 = 0; - $xfer += $input->readMapBegin($_ktype1160, $_vtype1161, $_size1159); - for ($_i1163 = 0; $_i1163 < $_size1159; ++$_i1163) + $_size1180 = 0; + $_ktype1181 = 0; + $_vtype1182 = 0; + $xfer += $input->readMapBegin($_ktype1181, $_vtype1182, $_size1180); + for ($_i1184 = 0; $_i1184 < $_size1180; ++$_i1184) { - $key1164 = ''; - $val1165 = ''; - $xfer += $input->readString($key1164); - $xfer += $input->readString($val1165); - $this->success[$key1164] = $val1165; + $key1185 = ''; + $val1186 = ''; + $xfer += $input->readString($key1185); + $xfer += $input->readString($val1186); + $this->success[$key1185] = $val1186; } $xfer += $input->readMapEnd(); } else { @@ -33067,10 +33426,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1166 => $viter1167) + foreach ($this->success as $kiter1187 => $viter1188) { - $xfer += $output->writeString($kiter1166); - $xfer += $output->writeString($viter1167); + $xfer += $output->writeString($kiter1187); + $xfer += $output->writeString($viter1188); } } $output->writeMapEnd(); @@ -33190,17 +33549,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1168 = 0; - $_ktype1169 = 0; - $_vtype1170 = 0; - $xfer += $input->readMapBegin($_ktype1169, $_vtype1170, $_size1168); - for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172) + $_size1189 = 0; + $_ktype1190 = 0; + $_vtype1191 = 0; + $xfer += $input->readMapBegin($_ktype1190, $_vtype1191, $_size1189); + for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193) { - $key1173 = ''; - $val1174 = ''; - $xfer += $input->readString($key1173); - $xfer += $input->readString($val1174); - $this->part_vals[$key1173] = $val1174; + $key1194 = ''; + $val1195 = ''; + $xfer += $input->readString($key1194); + $xfer += $input->readString($val1195); + $this->part_vals[$key1194] = $val1195; } $xfer += $input->readMapEnd(); } else { @@ -33245,10 +33604,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1175 => $viter1176) + foreach ($this->part_vals as $kiter1196 => $viter1197) { - $xfer += $output->writeString($kiter1175); - $xfer += $output->writeString($viter1176); + $xfer += $output->writeString($kiter1196); + $xfer += $output->writeString($viter1197); } } $output->writeMapEnd(); @@ -33570,17 +33929,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1177 = 0; - $_ktype1178 = 0; - $_vtype1179 = 0; - $xfer += $input->readMapBegin($_ktype1178, $_vtype1179, $_size1177); - for ($_i1181 = 0; $_i1181 < $_size1177; ++$_i1181) + $_size1198 = 0; + $_ktype1199 = 0; + $_vtype1200 = 0; + $xfer += $input->readMapBegin($_ktype1199, $_vtype1200, $_size1198); + for ($_i1202 = 0; $_i1202 < $_size1198; ++$_i1202) { - $key1182 = ''; - $val1183 = ''; - $xfer += $input->readString($key1182); - $xfer += $input->readString($val1183); - $this->part_vals[$key1182] = $val1183; + $key1203 = ''; + $val1204 = ''; + $xfer += $input->readString($key1203); + $xfer += $input->readString($val1204); + $this->part_vals[$key1203] = $val1204; } $xfer += $input->readMapEnd(); } else { @@ -33625,10 +33984,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1184 => $viter1185) + foreach ($this->part_vals as $kiter1205 => $viter1206) { - $xfer += $output->writeString($kiter1184); - $xfer += $output->writeString($viter1185); + $xfer += $output->writeString($kiter1205); + $xfer += $output->writeString($viter1206); } } $output->writeMapEnd(); @@ -35102,15 +35461,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1186 = 0; - $_etype1189 = 0; - $xfer += $input->readListBegin($_etype1189, $_size1186); - for ($_i1190 = 0; $_i1190 < $_size1186; ++$_i1190) + $_size1207 = 0; + $_etype1210 = 0; + $xfer += $input->readListBegin($_etype1210, $_size1207); + for ($_i1211 = 0; $_i1211 < $_size1207; ++$_i1211) { - $elem1191 = null; - $elem1191 = new \metastore\Index(); - $xfer += $elem1191->read($input); - $this->success []= $elem1191; + $elem1212 = null; + $elem1212 = new \metastore\Index(); + $xfer += $elem1212->read($input); + $this->success []= $elem1212; } $xfer += $input->readListEnd(); } else { @@ -35154,9 +35513,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1192) + foreach ($this->success as $iter1213) { - $xfer += $iter1192->write($output); + $xfer += $iter1213->write($output); } } $output->writeListEnd(); @@ -35363,14 +35722,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1193 = 0; - $_etype1196 = 0; - $xfer += $input->readListBegin($_etype1196, $_size1193); - for ($_i1197 = 0; $_i1197 < $_size1193; ++$_i1197) + $_size1214 = 0; + $_etype1217 = 0; + $xfer += $input->readListBegin($_etype1217, $_size1214); + for ($_i1218 = 0; $_i1218 < $_size1214; ++$_i1218) { - $elem1198 = null; - $xfer += $input->readString($elem1198); - $this->success []= $elem1198; + $elem1219 = null; + $xfer += $input->readString($elem1219); + $this->success []= $elem1219; } $xfer += $input->readListEnd(); } else { @@ -35406,9 +35765,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1199) + foreach ($this->success as $iter1220) { - $xfer += $output->writeString($iter1199); + $xfer += $output->writeString($iter1220); } } $output->writeListEnd(); @@ -36267,6 +36626,216 @@ class ThriftHiveMetastore_get_not_null_constraints_result { } +class ThriftHiveMetastore_get_default_constraints_args { + static $_TSPEC; + + /** + * @var \metastore\DefaultConstraintsRequest + */ + public $request = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\DefaultConstraintsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['request'])) { + $this->request = $vals['request']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_default_constraints_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\DefaultConstraintsRequest(); + $xfer += $this->request->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_default_constraints_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_default_constraints_result { + static $_TSPEC; + + /** + * @var \metastore\DefaultConstraintsResponse + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\DefaultConstraintsResponse', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_default_constraints_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\DefaultConstraintsResponse(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\NoSuchObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_default_constraints_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_update_table_column_statistics_args { static $_TSPEC; @@ -39722,14 +40291,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1200 = 0; - $_etype1203 = 0; - $xfer += $input->readListBegin($_etype1203, $_size1200); - for ($_i1204 = 0; $_i1204 < $_size1200; ++$_i1204) + $_size1221 = 0; + $_etype1224 = 0; + $xfer += $input->readListBegin($_etype1224, $_size1221); + for ($_i1225 = 0; $_i1225 < $_size1221; ++$_i1225) { - $elem1205 = null; - $xfer += $input->readString($elem1205); - $this->success []= $elem1205; + $elem1226 = null; + $xfer += $input->readString($elem1226); + $this->success []= $elem1226; } $xfer += $input->readListEnd(); } else { @@ -39765,9 +40334,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1206) + foreach ($this->success as $iter1227) { - $xfer += $output->writeString($iter1206); + $xfer += $output->writeString($iter1227); } } $output->writeListEnd(); @@ -40636,14 +41205,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1207 = 0; - $_etype1210 = 0; - $xfer += $input->readListBegin($_etype1210, $_size1207); - for ($_i1211 = 0; $_i1211 < $_size1207; ++$_i1211) + $_size1228 = 0; + $_etype1231 = 0; + $xfer += $input->readListBegin($_etype1231, $_size1228); + for ($_i1232 = 0; $_i1232 < $_size1228; ++$_i1232) { - $elem1212 = null; - $xfer += $input->readString($elem1212); - $this->success []= $elem1212; + $elem1233 = null; + $xfer += $input->readString($elem1233); + $this->success []= $elem1233; } $xfer += $input->readListEnd(); } else { @@ -40679,9 +41248,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1213) + foreach ($this->success as $iter1234) { - $xfer += $output->writeString($iter1213); + $xfer += $output->writeString($iter1234); } } $output->writeListEnd(); @@ -41372,15 +41941,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1214 = 0; - $_etype1217 = 0; - $xfer += $input->readListBegin($_etype1217, $_size1214); - for ($_i1218 = 0; $_i1218 < $_size1214; ++$_i1218) + $_size1235 = 0; + $_etype1238 = 0; + $xfer += $input->readListBegin($_etype1238, $_size1235); + for ($_i1239 = 0; $_i1239 < $_size1235; ++$_i1239) { - $elem1219 = null; - $elem1219 = new \metastore\Role(); - $xfer += $elem1219->read($input); - $this->success []= $elem1219; + $elem1240 = null; + $elem1240 = new \metastore\Role(); + $xfer += $elem1240->read($input); + $this->success []= $elem1240; } $xfer += $input->readListEnd(); } else { @@ -41416,9 +41985,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1220) + foreach ($this->success as $iter1241) { - $xfer += $iter1220->write($output); + $xfer += $iter1241->write($output); } } $output->writeListEnd(); @@ -42080,14 +42649,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1221 = 0; - $_etype1224 = 0; - $xfer += $input->readListBegin($_etype1224, $_size1221); - for ($_i1225 = 0; $_i1225 < $_size1221; ++$_i1225) + $_size1242 = 0; + $_etype1245 = 0; + $xfer += $input->readListBegin($_etype1245, $_size1242); + for ($_i1246 = 0; $_i1246 < $_size1242; ++$_i1246) { - $elem1226 = null; - $xfer += $input->readString($elem1226); - $this->group_names []= $elem1226; + $elem1247 = null; + $xfer += $input->readString($elem1247); + $this->group_names []= $elem1247; } $xfer += $input->readListEnd(); } else { @@ -42128,9 +42697,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1227) + foreach ($this->group_names as $iter1248) { - $xfer += $output->writeString($iter1227); + $xfer += $output->writeString($iter1248); } } $output->writeListEnd(); @@ -42438,15 +43007,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1228 = 0; - $_etype1231 = 0; - $xfer += $input->readListBegin($_etype1231, $_size1228); - for ($_i1232 = 0; $_i1232 < $_size1228; ++$_i1232) + $_size1249 = 0; + $_etype1252 = 0; + $xfer += $input->readListBegin($_etype1252, $_size1249); + for ($_i1253 = 0; $_i1253 < $_size1249; ++$_i1253) { - $elem1233 = null; - $elem1233 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1233->read($input); - $this->success []= $elem1233; + $elem1254 = null; + $elem1254 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1254->read($input); + $this->success []= $elem1254; } $xfer += $input->readListEnd(); } else { @@ -42482,9 +43051,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1234) + foreach ($this->success as $iter1255) { - $xfer += $iter1234->write($output); + $xfer += $iter1255->write($output); } } $output->writeListEnd(); @@ -43116,14 +43685,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1235 = 0; - $_etype1238 = 0; - $xfer += $input->readListBegin($_etype1238, $_size1235); - for ($_i1239 = 0; $_i1239 < $_size1235; ++$_i1239) + $_size1256 = 0; + $_etype1259 = 0; + $xfer += $input->readListBegin($_etype1259, $_size1256); + for ($_i1260 = 0; $_i1260 < $_size1256; ++$_i1260) { - $elem1240 = null; - $xfer += $input->readString($elem1240); - $this->group_names []= $elem1240; + $elem1261 = null; + $xfer += $input->readString($elem1261); + $this->group_names []= $elem1261; } $xfer += $input->readListEnd(); } else { @@ -43156,9 +43725,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1241) + foreach ($this->group_names as $iter1262) { - $xfer += $output->writeString($iter1241); + $xfer += $output->writeString($iter1262); } } $output->writeListEnd(); @@ -43234,14 +43803,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1242 = 0; - $_etype1245 = 0; - $xfer += $input->readListBegin($_etype1245, $_size1242); - for ($_i1246 = 0; $_i1246 < $_size1242; ++$_i1246) + $_size1263 = 0; + $_etype1266 = 0; + $xfer += $input->readListBegin($_etype1266, $_size1263); + for ($_i1267 = 0; $_i1267 < $_size1263; ++$_i1267) { - $elem1247 = null; - $xfer += $input->readString($elem1247); - $this->success []= $elem1247; + $elem1268 = null; + $xfer += $input->readString($elem1268); + $this->success []= $elem1268; } $xfer += $input->readListEnd(); } else { @@ -43277,9 +43846,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1248) + foreach ($this->success as $iter1269) { - $xfer += $output->writeString($iter1248); + $xfer += $output->writeString($iter1269); } } $output->writeListEnd(); @@ -44396,14 +44965,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1249 = 0; - $_etype1252 = 0; - $xfer += $input->readListBegin($_etype1252, $_size1249); - for ($_i1253 = 0; $_i1253 < $_size1249; ++$_i1253) + $_size1270 = 0; + $_etype1273 = 0; + $xfer += $input->readListBegin($_etype1273, $_size1270); + for ($_i1274 = 0; $_i1274 < $_size1270; ++$_i1274) { - $elem1254 = null; - $xfer += $input->readString($elem1254); - $this->success []= $elem1254; + $elem1275 = null; + $xfer += $input->readString($elem1275); + $this->success []= $elem1275; } $xfer += $input->readListEnd(); } else { @@ -44431,9 +45000,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1255) + foreach ($this->success as $iter1276) { - $xfer += $output->writeString($iter1255); + $xfer += $output->writeString($iter1276); } } $output->writeListEnd(); @@ -45072,14 +45641,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1256 = 0; - $_etype1259 = 0; - $xfer += $input->readListBegin($_etype1259, $_size1256); - for ($_i1260 = 0; $_i1260 < $_size1256; ++$_i1260) + $_size1277 = 0; + $_etype1280 = 0; + $xfer += $input->readListBegin($_etype1280, $_size1277); + for ($_i1281 = 0; $_i1281 < $_size1277; ++$_i1281) { - $elem1261 = null; - $xfer += $input->readString($elem1261); - $this->success []= $elem1261; + $elem1282 = null; + $xfer += $input->readString($elem1282); + $this->success []= $elem1282; } $xfer += $input->readListEnd(); } else { @@ -45107,9 +45676,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1262) + foreach ($this->success as $iter1283) { - $xfer += $output->writeString($iter1262); + $xfer += $output->writeString($iter1283); } } $output->writeListEnd(); diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index a6047bf7b3..fcdf2351b3 100644 --- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -1474,6 +1474,242 @@ class SQLNotNullConstraint { } +class SQLDefaultConstraint { + static $_TSPEC; + + /** + * @var string + */ + public $table_db = null; + /** + * @var string + */ + public $table_name = null; + /** + * @var string + */ + public $column_name = null; + /** + * @var string + */ + public $default_value = null; + /** + * @var string + */ + public $dc_name = null; + /** + * @var bool + */ + public $enable_cstr = null; + /** + * @var bool + */ + public $validate_cstr = null; + /** + * @var bool + */ + public $rely_cstr = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'table_db', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'table_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'column_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'default_value', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'dc_name', + 'type' => TType::STRING, + ), + 6 => array( + 'var' => 'enable_cstr', + 'type' => TType::BOOL, + ), + 7 => array( + 'var' => 'validate_cstr', + 'type' => TType::BOOL, + ), + 8 => array( + 'var' => 'rely_cstr', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['table_db'])) { + $this->table_db = $vals['table_db']; + } + if (isset($vals['table_name'])) { + $this->table_name = $vals['table_name']; + } + if (isset($vals['column_name'])) { + $this->column_name = $vals['column_name']; + } + if (isset($vals['default_value'])) { + $this->default_value = $vals['default_value']; + } + if (isset($vals['dc_name'])) { + $this->dc_name = $vals['dc_name']; + } + if (isset($vals['enable_cstr'])) { + $this->enable_cstr = $vals['enable_cstr']; + } + if (isset($vals['validate_cstr'])) { + $this->validate_cstr = $vals['validate_cstr']; + } + if (isset($vals['rely_cstr'])) { + $this->rely_cstr = $vals['rely_cstr']; + } + } + } + + public function getName() { + return 'SQLDefaultConstraint'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->table_db); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->table_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->column_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->default_value); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dc_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->enable_cstr); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->validate_cstr); + } else { + $xfer += $input->skip($ftype); + } + break; + case 8: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->rely_cstr); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SQLDefaultConstraint'); + if ($this->table_db !== null) { + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeString($this->table_db); + $xfer += $output->writeFieldEnd(); + } + if ($this->table_name !== null) { + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeString($this->table_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->column_name !== null) { + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeString($this->column_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->default_value !== null) { + $xfer += $output->writeFieldBegin('default_value', TType::STRING, 4); + $xfer += $output->writeString($this->default_value); + $xfer += $output->writeFieldEnd(); + } + if ($this->dc_name !== null) { + $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 5); + $xfer += $output->writeString($this->dc_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->enable_cstr !== null) { + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); + $xfer += $output->writeBool($this->enable_cstr); + $xfer += $output->writeFieldEnd(); + } + if ($this->validate_cstr !== null) { + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); + $xfer += $output->writeBool($this->validate_cstr); + $xfer += $output->writeFieldEnd(); + } + if ($this->rely_cstr !== null) { + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); + $xfer += $output->writeBool($this->rely_cstr); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class Type { static $_TSPEC; @@ -10124,54 +10360,43 @@ class NotNullConstraintsResponse { } -class DropConstraintRequest { +class DefaultConstraintsRequest { static $_TSPEC; /** * @var string */ - public $dbname = null; - /** - * @var string - */ - public $tablename = null; + public $db_name = null; /** * @var string */ - public $constraintname = null; + public $tbl_name = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbname', + 'var' => 'db_name', 'type' => TType::STRING, ), 2 => array( - 'var' => 'tablename', - 'type' => TType::STRING, - ), - 3 => array( - 'var' => 'constraintname', + 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { - if (isset($vals['dbname'])) { - $this->dbname = $vals['dbname']; - } - if (isset($vals['tablename'])) { - $this->tablename = $vals['tablename']; + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; } - if (isset($vals['constraintname'])) { - $this->constraintname = $vals['constraintname']; + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; } } } public function getName() { - return 'DropConstraintRequest'; + return 'DefaultConstraintsRequest'; } public function read($input) @@ -10191,24 +10416,236 @@ class DropConstraintRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbname); + $xfer += $input->readString($this->db_name); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tablename); + $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); } break; - case 3: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->constraintname); - } else { - $xfer += $input->skip($ftype); - } + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('DefaultConstraintsRequest'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class DefaultConstraintsResponse { + static $_TSPEC; + + /** + * @var \metastore\SQLDefaultConstraint[] + */ + public $defaultConstraints = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'defaultConstraints', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\SQLDefaultConstraint', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['defaultConstraints'])) { + $this->defaultConstraints = $vals['defaultConstraints']; + } + } + } + + public function getName() { + return 'DefaultConstraintsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->defaultConstraints = array(); + $_size313 = 0; + $_etype316 = 0; + $xfer += $input->readListBegin($_etype316, $_size313); + for ($_i317 = 0; $_i317 < $_size313; ++$_i317) + { + $elem318 = null; + $elem318 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem318->read($input); + $this->defaultConstraints []= $elem318; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('DefaultConstraintsResponse'); + if ($this->defaultConstraints !== null) { + if (!is_array($this->defaultConstraints)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('defaultConstraints', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); + { + foreach ($this->defaultConstraints as $iter319) + { + $xfer += $iter319->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class DropConstraintRequest { + static $_TSPEC; + + /** + * @var string + */ + public $dbname = null; + /** + * @var string + */ + public $tablename = null; + /** + * @var string + */ + public $constraintname = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbname', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tablename', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'constraintname', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbname'])) { + $this->dbname = $vals['dbname']; + } + if (isset($vals['tablename'])) { + $this->tablename = $vals['tablename']; + } + if (isset($vals['constraintname'])) { + $this->constraintname = $vals['constraintname']; + } + } + } + + public function getName() { + return 'DropConstraintRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tablename); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->constraintname); + } else { + $xfer += $input->skip($ftype); + } break; default: $xfer += $input->skip($ftype); @@ -10296,15 +10733,15 @@ class AddPrimaryKeyRequest { case 1: if ($ftype == TType::LST) { $this->primaryKeyCols = array(); - $_size313 = 0; - $_etype316 = 0; - $xfer += $input->readListBegin($_etype316, $_size313); - for ($_i317 = 0; $_i317 < $_size313; ++$_i317) + $_size320 = 0; + $_etype323 = 0; + $xfer += $input->readListBegin($_etype323, $_size320); + for ($_i324 = 0; $_i324 < $_size320; ++$_i324) { - $elem318 = null; - $elem318 = new \metastore\SQLPrimaryKey(); - $xfer += $elem318->read($input); - $this->primaryKeyCols []= $elem318; + $elem325 = null; + $elem325 = new \metastore\SQLPrimaryKey(); + $xfer += $elem325->read($input); + $this->primaryKeyCols []= $elem325; } $xfer += $input->readListEnd(); } else { @@ -10332,9 +10769,9 @@ class AddPrimaryKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeyCols)); { - foreach ($this->primaryKeyCols as $iter319) + foreach ($this->primaryKeyCols as $iter326) { - $xfer += $iter319->write($output); + $xfer += $iter326->write($output); } } $output->writeListEnd(); @@ -10399,15 +10836,15 @@ class AddForeignKeyRequest { case 1: if ($ftype == TType::LST) { $this->foreignKeyCols = array(); - $_size320 = 0; - $_etype323 = 0; - $xfer += $input->readListBegin($_etype323, $_size320); - for ($_i324 = 0; $_i324 < $_size320; ++$_i324) + $_size327 = 0; + $_etype330 = 0; + $xfer += $input->readListBegin($_etype330, $_size327); + for ($_i331 = 0; $_i331 < $_size327; ++$_i331) { - $elem325 = null; - $elem325 = new \metastore\SQLForeignKey(); - $xfer += $elem325->read($input); - $this->foreignKeyCols []= $elem325; + $elem332 = null; + $elem332 = new \metastore\SQLForeignKey(); + $xfer += $elem332->read($input); + $this->foreignKeyCols []= $elem332; } $xfer += $input->readListEnd(); } else { @@ -10435,9 +10872,9 @@ class AddForeignKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeyCols)); { - foreach ($this->foreignKeyCols as $iter326) + foreach ($this->foreignKeyCols as $iter333) { - $xfer += $iter326->write($output); + $xfer += $iter333->write($output); } } $output->writeListEnd(); @@ -10502,15 +10939,15 @@ class AddUniqueConstraintRequest { case 1: if ($ftype == TType::LST) { $this->uniqueConstraintCols = array(); - $_size327 = 0; - $_etype330 = 0; - $xfer += $input->readListBegin($_etype330, $_size327); - for ($_i331 = 0; $_i331 < $_size327; ++$_i331) + $_size334 = 0; + $_etype337 = 0; + $xfer += $input->readListBegin($_etype337, $_size334); + for ($_i338 = 0; $_i338 < $_size334; ++$_i338) { - $elem332 = null; - $elem332 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem332->read($input); - $this->uniqueConstraintCols []= $elem332; + $elem339 = null; + $elem339 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem339->read($input); + $this->uniqueConstraintCols []= $elem339; } $xfer += $input->readListEnd(); } else { @@ -10538,9 +10975,9 @@ class AddUniqueConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraintCols)); { - foreach ($this->uniqueConstraintCols as $iter333) + foreach ($this->uniqueConstraintCols as $iter340) { - $xfer += $iter333->write($output); + $xfer += $iter340->write($output); } } $output->writeListEnd(); @@ -10605,15 +11042,15 @@ class AddNotNullConstraintRequest { case 1: if ($ftype == TType::LST) { $this->notNullConstraintCols = array(); - $_size334 = 0; - $_etype337 = 0; - $xfer += $input->readListBegin($_etype337, $_size334); - for ($_i338 = 0; $_i338 < $_size334; ++$_i338) + $_size341 = 0; + $_etype344 = 0; + $xfer += $input->readListBegin($_etype344, $_size341); + for ($_i345 = 0; $_i345 < $_size341; ++$_i345) { - $elem339 = null; - $elem339 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem339->read($input); - $this->notNullConstraintCols []= $elem339; + $elem346 = null; + $elem346 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem346->read($input); + $this->notNullConstraintCols []= $elem346; } $xfer += $input->readListEnd(); } else { @@ -10641,9 +11078,112 @@ class AddNotNullConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraintCols)); { - foreach ($this->notNullConstraintCols as $iter340) + foreach ($this->notNullConstraintCols as $iter347) { - $xfer += $iter340->write($output); + $xfer += $iter347->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class AddDefaultConstraintRequest { + static $_TSPEC; + + /** + * @var \metastore\SQLDefaultConstraint[] + */ + public $defaultConstraintCols = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'defaultConstraintCols', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\SQLDefaultConstraint', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['defaultConstraintCols'])) { + $this->defaultConstraintCols = $vals['defaultConstraintCols']; + } + } + } + + public function getName() { + return 'AddDefaultConstraintRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->defaultConstraintCols = array(); + $_size348 = 0; + $_etype351 = 0; + $xfer += $input->readListBegin($_etype351, $_size348); + for ($_i352 = 0; $_i352 < $_size348; ++$_i352) + { + $elem353 = null; + $elem353 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem353->read($input); + $this->defaultConstraintCols []= $elem353; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('AddDefaultConstraintRequest'); + if ($this->defaultConstraintCols !== null) { + if (!is_array($this->defaultConstraintCols)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('defaultConstraintCols', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->defaultConstraintCols)); + { + foreach ($this->defaultConstraintCols as $iter354) + { + $xfer += $iter354->write($output); } } $output->writeListEnd(); @@ -10719,15 +11259,15 @@ class PartitionsByExprResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size341 = 0; - $_etype344 = 0; - $xfer += $input->readListBegin($_etype344, $_size341); - for ($_i345 = 0; $_i345 < $_size341; ++$_i345) + $_size355 = 0; + $_etype358 = 0; + $xfer += $input->readListBegin($_etype358, $_size355); + for ($_i359 = 0; $_i359 < $_size355; ++$_i359) { - $elem346 = null; - $elem346 = new \metastore\Partition(); - $xfer += $elem346->read($input); - $this->partitions []= $elem346; + $elem360 = null; + $elem360 = new \metastore\Partition(); + $xfer += $elem360->read($input); + $this->partitions []= $elem360; } $xfer += $input->readListEnd(); } else { @@ -10762,9 +11302,9 @@ class PartitionsByExprResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter347) + foreach ($this->partitions as $iter361) { - $xfer += $iter347->write($output); + $xfer += $iter361->write($output); } } $output->writeListEnd(); @@ -11001,15 +11541,15 @@ class TableStatsResult { case 1: if ($ftype == TType::LST) { $this->tableStats = array(); - $_size348 = 0; - $_etype351 = 0; - $xfer += $input->readListBegin($_etype351, $_size348); - for ($_i352 = 0; $_i352 < $_size348; ++$_i352) + $_size362 = 0; + $_etype365 = 0; + $xfer += $input->readListBegin($_etype365, $_size362); + for ($_i366 = 0; $_i366 < $_size362; ++$_i366) { - $elem353 = null; - $elem353 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem353->read($input); - $this->tableStats []= $elem353; + $elem367 = null; + $elem367 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem367->read($input); + $this->tableStats []= $elem367; } $xfer += $input->readListEnd(); } else { @@ -11037,9 +11577,9 @@ class TableStatsResult { { $output->writeListBegin(TType::STRUCT, count($this->tableStats)); { - foreach ($this->tableStats as $iter354) + foreach ($this->tableStats as $iter368) { - $xfer += $iter354->write($output); + $xfer += $iter368->write($output); } } $output->writeListEnd(); @@ -11112,28 +11652,28 @@ class PartitionsStatsResult { case 1: if ($ftype == TType::MAP) { $this->partStats = array(); - $_size355 = 0; - $_ktype356 = 0; - $_vtype357 = 0; - $xfer += $input->readMapBegin($_ktype356, $_vtype357, $_size355); - for ($_i359 = 0; $_i359 < $_size355; ++$_i359) + $_size369 = 0; + $_ktype370 = 0; + $_vtype371 = 0; + $xfer += $input->readMapBegin($_ktype370, $_vtype371, $_size369); + for ($_i373 = 0; $_i373 < $_size369; ++$_i373) { - $key360 = ''; - $val361 = array(); - $xfer += $input->readString($key360); - $val361 = array(); - $_size362 = 0; - $_etype365 = 0; - $xfer += $input->readListBegin($_etype365, $_size362); - for ($_i366 = 0; $_i366 < $_size362; ++$_i366) + $key374 = ''; + $val375 = array(); + $xfer += $input->readString($key374); + $val375 = array(); + $_size376 = 0; + $_etype379 = 0; + $xfer += $input->readListBegin($_etype379, $_size376); + for ($_i380 = 0; $_i380 < $_size376; ++$_i380) { - $elem367 = null; - $elem367 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem367->read($input); - $val361 []= $elem367; + $elem381 = null; + $elem381 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem381->read($input); + $val375 []= $elem381; } $xfer += $input->readListEnd(); - $this->partStats[$key360] = $val361; + $this->partStats[$key374] = $val375; } $xfer += $input->readMapEnd(); } else { @@ -11161,15 +11701,15 @@ class PartitionsStatsResult { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->partStats)); { - foreach ($this->partStats as $kiter368 => $viter369) + foreach ($this->partStats as $kiter382 => $viter383) { - $xfer += $output->writeString($kiter368); + $xfer += $output->writeString($kiter382); { - $output->writeListBegin(TType::STRUCT, count($viter369)); + $output->writeListBegin(TType::STRUCT, count($viter383)); { - foreach ($viter369 as $iter370) + foreach ($viter383 as $iter384) { - $xfer += $iter370->write($output); + $xfer += $iter384->write($output); } } $output->writeListEnd(); @@ -11273,14 +11813,14 @@ class TableStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size371 = 0; - $_etype374 = 0; - $xfer += $input->readListBegin($_etype374, $_size371); - for ($_i375 = 0; $_i375 < $_size371; ++$_i375) + $_size385 = 0; + $_etype388 = 0; + $xfer += $input->readListBegin($_etype388, $_size385); + for ($_i389 = 0; $_i389 < $_size385; ++$_i389) { - $elem376 = null; - $xfer += $input->readString($elem376); - $this->colNames []= $elem376; + $elem390 = null; + $xfer += $input->readString($elem390); + $this->colNames []= $elem390; } $xfer += $input->readListEnd(); } else { @@ -11318,9 +11858,9 @@ class TableStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter377) + foreach ($this->colNames as $iter391) { - $xfer += $output->writeString($iter377); + $xfer += $output->writeString($iter391); } } $output->writeListEnd(); @@ -11435,14 +11975,14 @@ class PartitionsStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size378 = 0; - $_etype381 = 0; - $xfer += $input->readListBegin($_etype381, $_size378); - for ($_i382 = 0; $_i382 < $_size378; ++$_i382) + $_size392 = 0; + $_etype395 = 0; + $xfer += $input->readListBegin($_etype395, $_size392); + for ($_i396 = 0; $_i396 < $_size392; ++$_i396) { - $elem383 = null; - $xfer += $input->readString($elem383); - $this->colNames []= $elem383; + $elem397 = null; + $xfer += $input->readString($elem397); + $this->colNames []= $elem397; } $xfer += $input->readListEnd(); } else { @@ -11452,14 +11992,14 @@ class PartitionsStatsRequest { case 4: if ($ftype == TType::LST) { $this->partNames = array(); - $_size384 = 0; - $_etype387 = 0; - $xfer += $input->readListBegin($_etype387, $_size384); - for ($_i388 = 0; $_i388 < $_size384; ++$_i388) + $_size398 = 0; + $_etype401 = 0; + $xfer += $input->readListBegin($_etype401, $_size398); + for ($_i402 = 0; $_i402 < $_size398; ++$_i402) { - $elem389 = null; - $xfer += $input->readString($elem389); - $this->partNames []= $elem389; + $elem403 = null; + $xfer += $input->readString($elem403); + $this->partNames []= $elem403; } $xfer += $input->readListEnd(); } else { @@ -11497,9 +12037,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter390) + foreach ($this->colNames as $iter404) { - $xfer += $output->writeString($iter390); + $xfer += $output->writeString($iter404); } } $output->writeListEnd(); @@ -11514,9 +12054,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter391) + foreach ($this->partNames as $iter405) { - $xfer += $output->writeString($iter391); + $xfer += $output->writeString($iter405); } } $output->writeListEnd(); @@ -11581,15 +12121,15 @@ class AddPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size392 = 0; - $_etype395 = 0; - $xfer += $input->readListBegin($_etype395, $_size392); - for ($_i396 = 0; $_i396 < $_size392; ++$_i396) + $_size406 = 0; + $_etype409 = 0; + $xfer += $input->readListBegin($_etype409, $_size406); + for ($_i410 = 0; $_i410 < $_size406; ++$_i410) { - $elem397 = null; - $elem397 = new \metastore\Partition(); - $xfer += $elem397->read($input); - $this->partitions []= $elem397; + $elem411 = null; + $elem411 = new \metastore\Partition(); + $xfer += $elem411->read($input); + $this->partitions []= $elem411; } $xfer += $input->readListEnd(); } else { @@ -11617,9 +12157,9 @@ class AddPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter398) + foreach ($this->partitions as $iter412) { - $xfer += $iter398->write($output); + $xfer += $iter412->write($output); } } $output->writeListEnd(); @@ -11742,15 +12282,15 @@ class AddPartitionsRequest { case 3: if ($ftype == TType::LST) { $this->parts = array(); - $_size399 = 0; - $_etype402 = 0; - $xfer += $input->readListBegin($_etype402, $_size399); - for ($_i403 = 0; $_i403 < $_size399; ++$_i403) + $_size413 = 0; + $_etype416 = 0; + $xfer += $input->readListBegin($_etype416, $_size413); + for ($_i417 = 0; $_i417 < $_size413; ++$_i417) { - $elem404 = null; - $elem404 = new \metastore\Partition(); - $xfer += $elem404->read($input); - $this->parts []= $elem404; + $elem418 = null; + $elem418 = new \metastore\Partition(); + $xfer += $elem418->read($input); + $this->parts []= $elem418; } $xfer += $input->readListEnd(); } else { @@ -11802,9 +12342,9 @@ class AddPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->parts)); { - foreach ($this->parts as $iter405) + foreach ($this->parts as $iter419) { - $xfer += $iter405->write($output); + $xfer += $iter419->write($output); } } $output->writeListEnd(); @@ -11879,15 +12419,15 @@ class DropPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size406 = 0; - $_etype409 = 0; - $xfer += $input->readListBegin($_etype409, $_size406); - for ($_i410 = 0; $_i410 < $_size406; ++$_i410) + $_size420 = 0; + $_etype423 = 0; + $xfer += $input->readListBegin($_etype423, $_size420); + for ($_i424 = 0; $_i424 < $_size420; ++$_i424) { - $elem411 = null; - $elem411 = new \metastore\Partition(); - $xfer += $elem411->read($input); - $this->partitions []= $elem411; + $elem425 = null; + $elem425 = new \metastore\Partition(); + $xfer += $elem425->read($input); + $this->partitions []= $elem425; } $xfer += $input->readListEnd(); } else { @@ -11915,9 +12455,9 @@ class DropPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter412) + foreach ($this->partitions as $iter426) { - $xfer += $iter412->write($output); + $xfer += $iter426->write($output); } } $output->writeListEnd(); @@ -12095,14 +12635,14 @@ class RequestPartsSpec { case 1: if ($ftype == TType::LST) { $this->names = array(); - $_size413 = 0; - $_etype416 = 0; - $xfer += $input->readListBegin($_etype416, $_size413); - for ($_i417 = 0; $_i417 < $_size413; ++$_i417) + $_size427 = 0; + $_etype430 = 0; + $xfer += $input->readListBegin($_etype430, $_size427); + for ($_i431 = 0; $_i431 < $_size427; ++$_i431) { - $elem418 = null; - $xfer += $input->readString($elem418); - $this->names []= $elem418; + $elem432 = null; + $xfer += $input->readString($elem432); + $this->names []= $elem432; } $xfer += $input->readListEnd(); } else { @@ -12112,15 +12652,15 @@ class RequestPartsSpec { case 2: if ($ftype == TType::LST) { $this->exprs = array(); - $_size419 = 0; - $_etype422 = 0; - $xfer += $input->readListBegin($_etype422, $_size419); - for ($_i423 = 0; $_i423 < $_size419; ++$_i423) + $_size433 = 0; + $_etype436 = 0; + $xfer += $input->readListBegin($_etype436, $_size433); + for ($_i437 = 0; $_i437 < $_size433; ++$_i437) { - $elem424 = null; - $elem424 = new \metastore\DropPartitionsExpr(); - $xfer += $elem424->read($input); - $this->exprs []= $elem424; + $elem438 = null; + $elem438 = new \metastore\DropPartitionsExpr(); + $xfer += $elem438->read($input); + $this->exprs []= $elem438; } $xfer += $input->readListEnd(); } else { @@ -12148,9 +12688,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter425) + foreach ($this->names as $iter439) { - $xfer += $output->writeString($iter425); + $xfer += $output->writeString($iter439); } } $output->writeListEnd(); @@ -12165,9 +12705,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRUCT, count($this->exprs)); { - foreach ($this->exprs as $iter426) + foreach ($this->exprs as $iter440) { - $xfer += $iter426->write($output); + $xfer += $iter440->write($output); } } $output->writeListEnd(); @@ -12574,15 +13114,15 @@ class PartitionValuesRequest { case 3: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size427 = 0; - $_etype430 = 0; - $xfer += $input->readListBegin($_etype430, $_size427); - for ($_i431 = 0; $_i431 < $_size427; ++$_i431) + $_size441 = 0; + $_etype444 = 0; + $xfer += $input->readListBegin($_etype444, $_size441); + for ($_i445 = 0; $_i445 < $_size441; ++$_i445) { - $elem432 = null; - $elem432 = new \metastore\FieldSchema(); - $xfer += $elem432->read($input); - $this->partitionKeys []= $elem432; + $elem446 = null; + $elem446 = new \metastore\FieldSchema(); + $xfer += $elem446->read($input); + $this->partitionKeys []= $elem446; } $xfer += $input->readListEnd(); } else { @@ -12606,15 +13146,15 @@ class PartitionValuesRequest { case 6: if ($ftype == TType::LST) { $this->partitionOrder = array(); - $_size433 = 0; - $_etype436 = 0; - $xfer += $input->readListBegin($_etype436, $_size433); - for ($_i437 = 0; $_i437 < $_size433; ++$_i437) + $_size447 = 0; + $_etype450 = 0; + $xfer += $input->readListBegin($_etype450, $_size447); + for ($_i451 = 0; $_i451 < $_size447; ++$_i451) { - $elem438 = null; - $elem438 = new \metastore\FieldSchema(); - $xfer += $elem438->read($input); - $this->partitionOrder []= $elem438; + $elem452 = null; + $elem452 = new \metastore\FieldSchema(); + $xfer += $elem452->read($input); + $this->partitionOrder []= $elem452; } $xfer += $input->readListEnd(); } else { @@ -12666,9 +13206,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter439) + foreach ($this->partitionKeys as $iter453) { - $xfer += $iter439->write($output); + $xfer += $iter453->write($output); } } $output->writeListEnd(); @@ -12693,9 +13233,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionOrder)); { - foreach ($this->partitionOrder as $iter440) + foreach ($this->partitionOrder as $iter454) { - $xfer += $iter440->write($output); + $xfer += $iter454->write($output); } } $output->writeListEnd(); @@ -12769,14 +13309,14 @@ class PartitionValuesRow { case 1: if ($ftype == TType::LST) { $this->row = array(); - $_size441 = 0; - $_etype444 = 0; - $xfer += $input->readListBegin($_etype444, $_size441); - for ($_i445 = 0; $_i445 < $_size441; ++$_i445) + $_size455 = 0; + $_etype458 = 0; + $xfer += $input->readListBegin($_etype458, $_size455); + for ($_i459 = 0; $_i459 < $_size455; ++$_i459) { - $elem446 = null; - $xfer += $input->readString($elem446); - $this->row []= $elem446; + $elem460 = null; + $xfer += $input->readString($elem460); + $this->row []= $elem460; } $xfer += $input->readListEnd(); } else { @@ -12804,9 +13344,9 @@ class PartitionValuesRow { { $output->writeListBegin(TType::STRING, count($this->row)); { - foreach ($this->row as $iter447) + foreach ($this->row as $iter461) { - $xfer += $output->writeString($iter447); + $xfer += $output->writeString($iter461); } } $output->writeListEnd(); @@ -12871,15 +13411,15 @@ class PartitionValuesResponse { case 1: if ($ftype == TType::LST) { $this->partitionValues = array(); - $_size448 = 0; - $_etype451 = 0; - $xfer += $input->readListBegin($_etype451, $_size448); - for ($_i452 = 0; $_i452 < $_size448; ++$_i452) + $_size462 = 0; + $_etype465 = 0; + $xfer += $input->readListBegin($_etype465, $_size462); + for ($_i466 = 0; $_i466 < $_size462; ++$_i466) { - $elem453 = null; - $elem453 = new \metastore\PartitionValuesRow(); - $xfer += $elem453->read($input); - $this->partitionValues []= $elem453; + $elem467 = null; + $elem467 = new \metastore\PartitionValuesRow(); + $xfer += $elem467->read($input); + $this->partitionValues []= $elem467; } $xfer += $input->readListEnd(); } else { @@ -12907,9 +13447,9 @@ class PartitionValuesResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionValues)); { - foreach ($this->partitionValues as $iter454) + foreach ($this->partitionValues as $iter468) { - $xfer += $iter454->write($output); + $xfer += $iter468->write($output); } } $output->writeListEnd(); @@ -13198,15 +13738,15 @@ class Function { case 8: if ($ftype == TType::LST) { $this->resourceUris = array(); - $_size455 = 0; - $_etype458 = 0; - $xfer += $input->readListBegin($_etype458, $_size455); - for ($_i459 = 0; $_i459 < $_size455; ++$_i459) + $_size469 = 0; + $_etype472 = 0; + $xfer += $input->readListBegin($_etype472, $_size469); + for ($_i473 = 0; $_i473 < $_size469; ++$_i473) { - $elem460 = null; - $elem460 = new \metastore\ResourceUri(); - $xfer += $elem460->read($input); - $this->resourceUris []= $elem460; + $elem474 = null; + $elem474 = new \metastore\ResourceUri(); + $xfer += $elem474->read($input); + $this->resourceUris []= $elem474; } $xfer += $input->readListEnd(); } else { @@ -13269,9 +13809,9 @@ class Function { { $output->writeListBegin(TType::STRUCT, count($this->resourceUris)); { - foreach ($this->resourceUris as $iter461) + foreach ($this->resourceUris as $iter475) { - $xfer += $iter461->write($output); + $xfer += $iter475->write($output); } } $output->writeListEnd(); @@ -13613,15 +14153,15 @@ class GetOpenTxnsInfoResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size462 = 0; - $_etype465 = 0; - $xfer += $input->readListBegin($_etype465, $_size462); - for ($_i466 = 0; $_i466 < $_size462; ++$_i466) + $_size476 = 0; + $_etype479 = 0; + $xfer += $input->readListBegin($_etype479, $_size476); + for ($_i480 = 0; $_i480 < $_size476; ++$_i480) { - $elem467 = null; - $elem467 = new \metastore\TxnInfo(); - $xfer += $elem467->read($input); - $this->open_txns []= $elem467; + $elem481 = null; + $elem481 = new \metastore\TxnInfo(); + $xfer += $elem481->read($input); + $this->open_txns []= $elem481; } $xfer += $input->readListEnd(); } else { @@ -13654,9 +14194,9 @@ class GetOpenTxnsInfoResponse { { $output->writeListBegin(TType::STRUCT, count($this->open_txns)); { - foreach ($this->open_txns as $iter468) + foreach ($this->open_txns as $iter482) { - $xfer += $iter468->write($output); + $xfer += $iter482->write($output); } } $output->writeListEnd(); @@ -13760,14 +14300,14 @@ class GetOpenTxnsResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size469 = 0; - $_etype472 = 0; - $xfer += $input->readListBegin($_etype472, $_size469); - for ($_i473 = 0; $_i473 < $_size469; ++$_i473) + $_size483 = 0; + $_etype486 = 0; + $xfer += $input->readListBegin($_etype486, $_size483); + for ($_i487 = 0; $_i487 < $_size483; ++$_i487) { - $elem474 = null; - $xfer += $input->readI64($elem474); - $this->open_txns []= $elem474; + $elem488 = null; + $xfer += $input->readI64($elem488); + $this->open_txns []= $elem488; } $xfer += $input->readListEnd(); } else { @@ -13814,9 +14354,9 @@ class GetOpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter475) + foreach ($this->open_txns as $iter489) { - $xfer += $output->writeI64($iter475); + $xfer += $output->writeI64($iter489); } } $output->writeListEnd(); @@ -14034,14 +14574,14 @@ class OpenTxnsResponse { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size476 = 0; - $_etype479 = 0; - $xfer += $input->readListBegin($_etype479, $_size476); - for ($_i480 = 0; $_i480 < $_size476; ++$_i480) + $_size490 = 0; + $_etype493 = 0; + $xfer += $input->readListBegin($_etype493, $_size490); + for ($_i494 = 0; $_i494 < $_size490; ++$_i494) { - $elem481 = null; - $xfer += $input->readI64($elem481); - $this->txn_ids []= $elem481; + $elem495 = null; + $xfer += $input->readI64($elem495); + $this->txn_ids []= $elem495; } $xfer += $input->readListEnd(); } else { @@ -14069,9 +14609,9 @@ class OpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter482) + foreach ($this->txn_ids as $iter496) { - $xfer += $output->writeI64($iter482); + $xfer += $output->writeI64($iter496); } } $output->writeListEnd(); @@ -14210,14 +14750,14 @@ class AbortTxnsRequest { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size483 = 0; - $_etype486 = 0; - $xfer += $input->readListBegin($_etype486, $_size483); - for ($_i487 = 0; $_i487 < $_size483; ++$_i487) + $_size497 = 0; + $_etype500 = 0; + $xfer += $input->readListBegin($_etype500, $_size497); + for ($_i501 = 0; $_i501 < $_size497; ++$_i501) { - $elem488 = null; - $xfer += $input->readI64($elem488); - $this->txn_ids []= $elem488; + $elem502 = null; + $xfer += $input->readI64($elem502); + $this->txn_ids []= $elem502; } $xfer += $input->readListEnd(); } else { @@ -14245,9 +14785,9 @@ class AbortTxnsRequest { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter489) + foreach ($this->txn_ids as $iter503) { - $xfer += $output->writeI64($iter489); + $xfer += $output->writeI64($iter503); } } $output->writeListEnd(); @@ -14397,14 +14937,14 @@ class GetValidWriteIdsRequest { case 1: if ($ftype == TType::LST) { $this->fullTableNames = array(); - $_size490 = 0; - $_etype493 = 0; - $xfer += $input->readListBegin($_etype493, $_size490); - for ($_i494 = 0; $_i494 < $_size490; ++$_i494) + $_size504 = 0; + $_etype507 = 0; + $xfer += $input->readListBegin($_etype507, $_size504); + for ($_i508 = 0; $_i508 < $_size504; ++$_i508) { - $elem495 = null; - $xfer += $input->readString($elem495); - $this->fullTableNames []= $elem495; + $elem509 = null; + $xfer += $input->readString($elem509); + $this->fullTableNames []= $elem509; } $xfer += $input->readListEnd(); } else { @@ -14439,9 +14979,9 @@ class GetValidWriteIdsRequest { { $output->writeListBegin(TType::STRING, count($this->fullTableNames)); { - foreach ($this->fullTableNames as $iter496) + foreach ($this->fullTableNames as $iter510) { - $xfer += $output->writeString($iter496); + $xfer += $output->writeString($iter510); } } $output->writeListEnd(); @@ -14568,14 +15108,14 @@ class TableValidWriteIds { case 3: if ($ftype == TType::LST) { $this->invalidWriteIds = array(); - $_size497 = 0; - $_etype500 = 0; - $xfer += $input->readListBegin($_etype500, $_size497); - for ($_i501 = 0; $_i501 < $_size497; ++$_i501) + $_size511 = 0; + $_etype514 = 0; + $xfer += $input->readListBegin($_etype514, $_size511); + for ($_i515 = 0; $_i515 < $_size511; ++$_i515) { - $elem502 = null; - $xfer += $input->readI64($elem502); - $this->invalidWriteIds []= $elem502; + $elem516 = null; + $xfer += $input->readI64($elem516); + $this->invalidWriteIds []= $elem516; } $xfer += $input->readListEnd(); } else { @@ -14627,9 +15167,9 @@ class TableValidWriteIds { { $output->writeListBegin(TType::I64, count($this->invalidWriteIds)); { - foreach ($this->invalidWriteIds as $iter503) + foreach ($this->invalidWriteIds as $iter517) { - $xfer += $output->writeI64($iter503); + $xfer += $output->writeI64($iter517); } } $output->writeListEnd(); @@ -14704,15 +15244,15 @@ class GetValidWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->tblValidWriteIds = array(); - $_size504 = 0; - $_etype507 = 0; - $xfer += $input->readListBegin($_etype507, $_size504); - for ($_i508 = 0; $_i508 < $_size504; ++$_i508) + $_size518 = 0; + $_etype521 = 0; + $xfer += $input->readListBegin($_etype521, $_size518); + for ($_i522 = 0; $_i522 < $_size518; ++$_i522) { - $elem509 = null; - $elem509 = new \metastore\TableValidWriteIds(); - $xfer += $elem509->read($input); - $this->tblValidWriteIds []= $elem509; + $elem523 = null; + $elem523 = new \metastore\TableValidWriteIds(); + $xfer += $elem523->read($input); + $this->tblValidWriteIds []= $elem523; } $xfer += $input->readListEnd(); } else { @@ -14740,9 +15280,9 @@ class GetValidWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->tblValidWriteIds)); { - foreach ($this->tblValidWriteIds as $iter510) + foreach ($this->tblValidWriteIds as $iter524) { - $xfer += $iter510->write($output); + $xfer += $iter524->write($output); } } $output->writeListEnd(); @@ -14828,14 +15368,14 @@ class AllocateTableWriteIdsRequest { case 1: if ($ftype == TType::LST) { $this->txnIds = array(); - $_size511 = 0; - $_etype514 = 0; - $xfer += $input->readListBegin($_etype514, $_size511); - for ($_i515 = 0; $_i515 < $_size511; ++$_i515) + $_size525 = 0; + $_etype528 = 0; + $xfer += $input->readListBegin($_etype528, $_size525); + for ($_i529 = 0; $_i529 < $_size525; ++$_i529) { - $elem516 = null; - $xfer += $input->readI64($elem516); - $this->txnIds []= $elem516; + $elem530 = null; + $xfer += $input->readI64($elem530); + $this->txnIds []= $elem530; } $xfer += $input->readListEnd(); } else { @@ -14877,9 +15417,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::I64, count($this->txnIds)); { - foreach ($this->txnIds as $iter517) + foreach ($this->txnIds as $iter531) { - $xfer += $output->writeI64($iter517); + $xfer += $output->writeI64($iter531); } } $output->writeListEnd(); @@ -15052,15 +15592,15 @@ class AllocateTableWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->txnToWriteIds = array(); - $_size518 = 0; - $_etype521 = 0; - $xfer += $input->readListBegin($_etype521, $_size518); - for ($_i522 = 0; $_i522 < $_size518; ++$_i522) + $_size532 = 0; + $_etype535 = 0; + $xfer += $input->readListBegin($_etype535, $_size532); + for ($_i536 = 0; $_i536 < $_size532; ++$_i536) { - $elem523 = null; - $elem523 = new \metastore\TxnToWriteId(); - $xfer += $elem523->read($input); - $this->txnToWriteIds []= $elem523; + $elem537 = null; + $elem537 = new \metastore\TxnToWriteId(); + $xfer += $elem537->read($input); + $this->txnToWriteIds []= $elem537; } $xfer += $input->readListEnd(); } else { @@ -15088,9 +15628,9 @@ class AllocateTableWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); { - foreach ($this->txnToWriteIds as $iter524) + foreach ($this->txnToWriteIds as $iter538) { - $xfer += $iter524->write($output); + $xfer += $iter538->write($output); } } $output->writeListEnd(); @@ -15435,15 +15975,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size525 = 0; - $_etype528 = 0; - $xfer += $input->readListBegin($_etype528, $_size525); - for ($_i529 = 0; $_i529 < $_size525; ++$_i529) + $_size539 = 0; + $_etype542 = 0; + $xfer += $input->readListBegin($_etype542, $_size539); + for ($_i543 = 0; $_i543 < $_size539; ++$_i543) { - $elem530 = null; - $elem530 = new \metastore\LockComponent(); - $xfer += $elem530->read($input); - $this->component []= $elem530; + $elem544 = null; + $elem544 = new \metastore\LockComponent(); + $xfer += $elem544->read($input); + $this->component []= $elem544; } $xfer += $input->readListEnd(); } else { @@ -15499,9 +16039,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter531) + foreach ($this->component as $iter545) { - $xfer += $iter531->write($output); + $xfer += $iter545->write($output); } } $output->writeListEnd(); @@ -16444,15 +16984,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size532 = 0; - $_etype535 = 0; - $xfer += $input->readListBegin($_etype535, $_size532); - for ($_i536 = 0; $_i536 < $_size532; ++$_i536) + $_size546 = 0; + $_etype549 = 0; + $xfer += $input->readListBegin($_etype549, $_size546); + for ($_i550 = 0; $_i550 < $_size546; ++$_i550) { - $elem537 = null; - $elem537 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem537->read($input); - $this->locks []= $elem537; + $elem551 = null; + $elem551 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem551->read($input); + $this->locks []= $elem551; } $xfer += $input->readListEnd(); } else { @@ -16480,9 +17020,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter538) + foreach ($this->locks as $iter552) { - $xfer += $iter538->write($output); + $xfer += $iter552->write($output); } } $output->writeListEnd(); @@ -16757,17 +17297,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size539 = 0; - $_etype542 = 0; - $xfer += $input->readSetBegin($_etype542, $_size539); - for ($_i543 = 0; $_i543 < $_size539; ++$_i543) + $_size553 = 0; + $_etype556 = 0; + $xfer += $input->readSetBegin($_etype556, $_size553); + for ($_i557 = 0; $_i557 < $_size553; ++$_i557) { - $elem544 = null; - $xfer += $input->readI64($elem544); - if (is_scalar($elem544)) { - $this->aborted[$elem544] = true; + $elem558 = null; + $xfer += $input->readI64($elem558); + if (is_scalar($elem558)) { + $this->aborted[$elem558] = true; } else { - $this->aborted []= $elem544; + $this->aborted []= $elem558; } } $xfer += $input->readSetEnd(); @@ -16778,17 +17318,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size545 = 0; - $_etype548 = 0; - $xfer += $input->readSetBegin($_etype548, $_size545); - for ($_i549 = 0; $_i549 < $_size545; ++$_i549) + $_size559 = 0; + $_etype562 = 0; + $xfer += $input->readSetBegin($_etype562, $_size559); + for ($_i563 = 0; $_i563 < $_size559; ++$_i563) { - $elem550 = null; - $xfer += $input->readI64($elem550); - if (is_scalar($elem550)) { - $this->nosuch[$elem550] = true; + $elem564 = null; + $xfer += $input->readI64($elem564); + if (is_scalar($elem564)) { + $this->nosuch[$elem564] = true; } else { - $this->nosuch []= $elem550; + $this->nosuch []= $elem564; } } $xfer += $input->readSetEnd(); @@ -16817,12 +17357,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter551 => $iter552) + foreach ($this->aborted as $iter565 => $iter566) { - if (is_scalar($iter552)) { - $xfer += $output->writeI64($iter551); + if (is_scalar($iter566)) { + $xfer += $output->writeI64($iter565); } else { - $xfer += $output->writeI64($iter552); + $xfer += $output->writeI64($iter566); } } } @@ -16838,12 +17378,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter553 => $iter554) + foreach ($this->nosuch as $iter567 => $iter568) { - if (is_scalar($iter554)) { - $xfer += $output->writeI64($iter553); + if (is_scalar($iter568)) { + $xfer += $output->writeI64($iter567); } else { - $xfer += $output->writeI64($iter554); + $xfer += $output->writeI64($iter568); } } } @@ -17002,17 +17542,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size555 = 0; - $_ktype556 = 0; - $_vtype557 = 0; - $xfer += $input->readMapBegin($_ktype556, $_vtype557, $_size555); - for ($_i559 = 0; $_i559 < $_size555; ++$_i559) + $_size569 = 0; + $_ktype570 = 0; + $_vtype571 = 0; + $xfer += $input->readMapBegin($_ktype570, $_vtype571, $_size569); + for ($_i573 = 0; $_i573 < $_size569; ++$_i573) { - $key560 = ''; - $val561 = ''; - $xfer += $input->readString($key560); - $xfer += $input->readString($val561); - $this->properties[$key560] = $val561; + $key574 = ''; + $val575 = ''; + $xfer += $input->readString($key574); + $xfer += $input->readString($val575); + $this->properties[$key574] = $val575; } $xfer += $input->readMapEnd(); } else { @@ -17065,10 +17605,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter562 => $viter563) + foreach ($this->properties as $kiter576 => $viter577) { - $xfer += $output->writeString($kiter562); - $xfer += $output->writeString($viter563); + $xfer += $output->writeString($kiter576); + $xfer += $output->writeString($viter577); } } $output->writeMapEnd(); @@ -17655,15 +18195,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size564 = 0; - $_etype567 = 0; - $xfer += $input->readListBegin($_etype567, $_size564); - for ($_i568 = 0; $_i568 < $_size564; ++$_i568) + $_size578 = 0; + $_etype581 = 0; + $xfer += $input->readListBegin($_etype581, $_size578); + for ($_i582 = 0; $_i582 < $_size578; ++$_i582) { - $elem569 = null; - $elem569 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem569->read($input); - $this->compacts []= $elem569; + $elem583 = null; + $elem583 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem583->read($input); + $this->compacts []= $elem583; } $xfer += $input->readListEnd(); } else { @@ -17691,9 +18231,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter570) + foreach ($this->compacts as $iter584) { - $xfer += $iter570->write($output); + $xfer += $iter584->write($output); } } $output->writeListEnd(); @@ -17840,14 +18380,14 @@ class AddDynamicPartitions { case 5: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size571 = 0; - $_etype574 = 0; - $xfer += $input->readListBegin($_etype574, $_size571); - for ($_i575 = 0; $_i575 < $_size571; ++$_i575) + $_size585 = 0; + $_etype588 = 0; + $xfer += $input->readListBegin($_etype588, $_size585); + for ($_i589 = 0; $_i589 < $_size585; ++$_i589) { - $elem576 = null; - $xfer += $input->readString($elem576); - $this->partitionnames []= $elem576; + $elem590 = null; + $xfer += $input->readString($elem590); + $this->partitionnames []= $elem590; } $xfer += $input->readListEnd(); } else { @@ -17902,9 +18442,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter577) + foreach ($this->partitionnames as $iter591) { - $xfer += $output->writeString($iter577); + $xfer += $output->writeString($iter591); } } $output->writeListEnd(); @@ -18210,17 +18750,17 @@ class CreationMetadata { case 3: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size578 = 0; - $_etype581 = 0; - $xfer += $input->readSetBegin($_etype581, $_size578); - for ($_i582 = 0; $_i582 < $_size578; ++$_i582) + $_size592 = 0; + $_etype595 = 0; + $xfer += $input->readSetBegin($_etype595, $_size592); + for ($_i596 = 0; $_i596 < $_size592; ++$_i596) { - $elem583 = null; - $xfer += $input->readString($elem583); - if (is_scalar($elem583)) { - $this->tablesUsed[$elem583] = true; + $elem597 = null; + $xfer += $input->readString($elem597); + if (is_scalar($elem597)) { + $this->tablesUsed[$elem597] = true; } else { - $this->tablesUsed []= $elem583; + $this->tablesUsed []= $elem597; } } $xfer += $input->readSetEnd(); @@ -18266,12 +18806,12 @@ class CreationMetadata { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter584 => $iter585) + foreach ($this->tablesUsed as $iter598 => $iter599) { - if (is_scalar($iter585)) { - $xfer += $output->writeString($iter584); + if (is_scalar($iter599)) { + $xfer += $output->writeString($iter598); } else { - $xfer += $output->writeString($iter585); + $xfer += $output->writeString($iter599); } } } @@ -18653,15 +19193,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size586 = 0; - $_etype589 = 0; - $xfer += $input->readListBegin($_etype589, $_size586); - for ($_i590 = 0; $_i590 < $_size586; ++$_i590) + $_size600 = 0; + $_etype603 = 0; + $xfer += $input->readListBegin($_etype603, $_size600); + for ($_i604 = 0; $_i604 < $_size600; ++$_i604) { - $elem591 = null; - $elem591 = new \metastore\NotificationEvent(); - $xfer += $elem591->read($input); - $this->events []= $elem591; + $elem605 = null; + $elem605 = new \metastore\NotificationEvent(); + $xfer += $elem605->read($input); + $this->events []= $elem605; } $xfer += $input->readListEnd(); } else { @@ -18689,9 +19229,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter592) + foreach ($this->events as $iter606) { - $xfer += $iter592->write($output); + $xfer += $iter606->write($output); } } $output->writeListEnd(); @@ -19036,14 +19576,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size593 = 0; - $_etype596 = 0; - $xfer += $input->readListBegin($_etype596, $_size593); - for ($_i597 = 0; $_i597 < $_size593; ++$_i597) + $_size607 = 0; + $_etype610 = 0; + $xfer += $input->readListBegin($_etype610, $_size607); + for ($_i611 = 0; $_i611 < $_size607; ++$_i611) { - $elem598 = null; - $xfer += $input->readString($elem598); - $this->filesAdded []= $elem598; + $elem612 = null; + $xfer += $input->readString($elem612); + $this->filesAdded []= $elem612; } $xfer += $input->readListEnd(); } else { @@ -19053,14 +19593,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size599 = 0; - $_etype602 = 0; - $xfer += $input->readListBegin($_etype602, $_size599); - for ($_i603 = 0; $_i603 < $_size599; ++$_i603) + $_size613 = 0; + $_etype616 = 0; + $xfer += $input->readListBegin($_etype616, $_size613); + for ($_i617 = 0; $_i617 < $_size613; ++$_i617) { - $elem604 = null; - $xfer += $input->readString($elem604); - $this->filesAddedChecksum []= $elem604; + $elem618 = null; + $xfer += $input->readString($elem618); + $this->filesAddedChecksum []= $elem618; } $xfer += $input->readListEnd(); } else { @@ -19093,9 +19633,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter605) + foreach ($this->filesAdded as $iter619) { - $xfer += $output->writeString($iter605); + $xfer += $output->writeString($iter619); } } $output->writeListEnd(); @@ -19110,9 +19650,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter606) + foreach ($this->filesAddedChecksum as $iter620) { - $xfer += $output->writeString($iter606); + $xfer += $output->writeString($iter620); } } $output->writeListEnd(); @@ -19330,14 +19870,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size607 = 0; - $_etype610 = 0; - $xfer += $input->readListBegin($_etype610, $_size607); - for ($_i611 = 0; $_i611 < $_size607; ++$_i611) + $_size621 = 0; + $_etype624 = 0; + $xfer += $input->readListBegin($_etype624, $_size621); + for ($_i625 = 0; $_i625 < $_size621; ++$_i625) { - $elem612 = null; - $xfer += $input->readString($elem612); - $this->partitionVals []= $elem612; + $elem626 = null; + $xfer += $input->readString($elem626); + $this->partitionVals []= $elem626; } $xfer += $input->readListEnd(); } else { @@ -19388,9 +19928,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter613) + foreach ($this->partitionVals as $iter627) { - $xfer += $output->writeString($iter613); + $xfer += $output->writeString($iter627); } } $output->writeListEnd(); @@ -19618,18 +20158,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size614 = 0; - $_ktype615 = 0; - $_vtype616 = 0; - $xfer += $input->readMapBegin($_ktype615, $_vtype616, $_size614); - for ($_i618 = 0; $_i618 < $_size614; ++$_i618) + $_size628 = 0; + $_ktype629 = 0; + $_vtype630 = 0; + $xfer += $input->readMapBegin($_ktype629, $_vtype630, $_size628); + for ($_i632 = 0; $_i632 < $_size628; ++$_i632) { - $key619 = 0; - $val620 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key619); - $val620 = new \metastore\MetadataPpdResult(); - $xfer += $val620->read($input); - $this->metadata[$key619] = $val620; + $key633 = 0; + $val634 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key633); + $val634 = new \metastore\MetadataPpdResult(); + $xfer += $val634->read($input); + $this->metadata[$key633] = $val634; } $xfer += $input->readMapEnd(); } else { @@ -19664,10 +20204,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter621 => $viter622) + foreach ($this->metadata as $kiter635 => $viter636) { - $xfer += $output->writeI64($kiter621); - $xfer += $viter622->write($output); + $xfer += $output->writeI64($kiter635); + $xfer += $viter636->write($output); } } $output->writeMapEnd(); @@ -19769,14 +20309,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size623 = 0; - $_etype626 = 0; - $xfer += $input->readListBegin($_etype626, $_size623); - for ($_i627 = 0; $_i627 < $_size623; ++$_i627) + $_size637 = 0; + $_etype640 = 0; + $xfer += $input->readListBegin($_etype640, $_size637); + for ($_i641 = 0; $_i641 < $_size637; ++$_i641) { - $elem628 = null; - $xfer += $input->readI64($elem628); - $this->fileIds []= $elem628; + $elem642 = null; + $xfer += $input->readI64($elem642); + $this->fileIds []= $elem642; } $xfer += $input->readListEnd(); } else { @@ -19825,9 +20365,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter629) + foreach ($this->fileIds as $iter643) { - $xfer += $output->writeI64($iter629); + $xfer += $output->writeI64($iter643); } } $output->writeListEnd(); @@ -19921,17 +20461,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size630 = 0; - $_ktype631 = 0; - $_vtype632 = 0; - $xfer += $input->readMapBegin($_ktype631, $_vtype632, $_size630); - for ($_i634 = 0; $_i634 < $_size630; ++$_i634) + $_size644 = 0; + $_ktype645 = 0; + $_vtype646 = 0; + $xfer += $input->readMapBegin($_ktype645, $_vtype646, $_size644); + for ($_i648 = 0; $_i648 < $_size644; ++$_i648) { - $key635 = 0; - $val636 = ''; - $xfer += $input->readI64($key635); - $xfer += $input->readString($val636); - $this->metadata[$key635] = $val636; + $key649 = 0; + $val650 = ''; + $xfer += $input->readI64($key649); + $xfer += $input->readString($val650); + $this->metadata[$key649] = $val650; } $xfer += $input->readMapEnd(); } else { @@ -19966,10 +20506,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter637 => $viter638) + foreach ($this->metadata as $kiter651 => $viter652) { - $xfer += $output->writeI64($kiter637); - $xfer += $output->writeString($viter638); + $xfer += $output->writeI64($kiter651); + $xfer += $output->writeString($viter652); } } $output->writeMapEnd(); @@ -20038,14 +20578,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size639 = 0; - $_etype642 = 0; - $xfer += $input->readListBegin($_etype642, $_size639); - for ($_i643 = 0; $_i643 < $_size639; ++$_i643) + $_size653 = 0; + $_etype656 = 0; + $xfer += $input->readListBegin($_etype656, $_size653); + for ($_i657 = 0; $_i657 < $_size653; ++$_i657) { - $elem644 = null; - $xfer += $input->readI64($elem644); - $this->fileIds []= $elem644; + $elem658 = null; + $xfer += $input->readI64($elem658); + $this->fileIds []= $elem658; } $xfer += $input->readListEnd(); } else { @@ -20073,9 +20613,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter645) + foreach ($this->fileIds as $iter659) { - $xfer += $output->writeI64($iter645); + $xfer += $output->writeI64($iter659); } } $output->writeListEnd(); @@ -20215,14 +20755,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size646 = 0; - $_etype649 = 0; - $xfer += $input->readListBegin($_etype649, $_size646); - for ($_i650 = 0; $_i650 < $_size646; ++$_i650) + $_size660 = 0; + $_etype663 = 0; + $xfer += $input->readListBegin($_etype663, $_size660); + for ($_i664 = 0; $_i664 < $_size660; ++$_i664) { - $elem651 = null; - $xfer += $input->readI64($elem651); - $this->fileIds []= $elem651; + $elem665 = null; + $xfer += $input->readI64($elem665); + $this->fileIds []= $elem665; } $xfer += $input->readListEnd(); } else { @@ -20232,14 +20772,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size652 = 0; - $_etype655 = 0; - $xfer += $input->readListBegin($_etype655, $_size652); - for ($_i656 = 0; $_i656 < $_size652; ++$_i656) + $_size666 = 0; + $_etype669 = 0; + $xfer += $input->readListBegin($_etype669, $_size666); + for ($_i670 = 0; $_i670 < $_size666; ++$_i670) { - $elem657 = null; - $xfer += $input->readString($elem657); - $this->metadata []= $elem657; + $elem671 = null; + $xfer += $input->readString($elem671); + $this->metadata []= $elem671; } $xfer += $input->readListEnd(); } else { @@ -20274,9 +20814,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter658) + foreach ($this->fileIds as $iter672) { - $xfer += $output->writeI64($iter658); + $xfer += $output->writeI64($iter672); } } $output->writeListEnd(); @@ -20291,9 +20831,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter659) + foreach ($this->metadata as $iter673) { - $xfer += $output->writeString($iter659); + $xfer += $output->writeString($iter673); } } $output->writeListEnd(); @@ -20412,14 +20952,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size660 = 0; - $_etype663 = 0; - $xfer += $input->readListBegin($_etype663, $_size660); - for ($_i664 = 0; $_i664 < $_size660; ++$_i664) + $_size674 = 0; + $_etype677 = 0; + $xfer += $input->readListBegin($_etype677, $_size674); + for ($_i678 = 0; $_i678 < $_size674; ++$_i678) { - $elem665 = null; - $xfer += $input->readI64($elem665); - $this->fileIds []= $elem665; + $elem679 = null; + $xfer += $input->readI64($elem679); + $this->fileIds []= $elem679; } $xfer += $input->readListEnd(); } else { @@ -20447,9 +20987,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter666) + foreach ($this->fileIds as $iter680) { - $xfer += $output->writeI64($iter666); + $xfer += $output->writeI64($iter680); } } $output->writeListEnd(); @@ -20733,15 +21273,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size667 = 0; - $_etype670 = 0; - $xfer += $input->readListBegin($_etype670, $_size667); - for ($_i671 = 0; $_i671 < $_size667; ++$_i671) + $_size681 = 0; + $_etype684 = 0; + $xfer += $input->readListBegin($_etype684, $_size681); + for ($_i685 = 0; $_i685 < $_size681; ++$_i685) { - $elem672 = null; - $elem672 = new \metastore\Function(); - $xfer += $elem672->read($input); - $this->functions []= $elem672; + $elem686 = null; + $elem686 = new \metastore\Function(); + $xfer += $elem686->read($input); + $this->functions []= $elem686; } $xfer += $input->readListEnd(); } else { @@ -20769,9 +21309,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter673) + foreach ($this->functions as $iter687) { - $xfer += $iter673->write($output); + $xfer += $iter687->write($output); } } $output->writeListEnd(); @@ -20835,14 +21375,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size674 = 0; - $_etype677 = 0; - $xfer += $input->readListBegin($_etype677, $_size674); - for ($_i678 = 0; $_i678 < $_size674; ++$_i678) + $_size688 = 0; + $_etype691 = 0; + $xfer += $input->readListBegin($_etype691, $_size688); + for ($_i692 = 0; $_i692 < $_size688; ++$_i692) { - $elem679 = null; - $xfer += $input->readI32($elem679); - $this->values []= $elem679; + $elem693 = null; + $xfer += $input->readI32($elem693); + $this->values []= $elem693; } $xfer += $input->readListEnd(); } else { @@ -20870,9 +21410,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter680) + foreach ($this->values as $iter694) { - $xfer += $output->writeI32($iter680); + $xfer += $output->writeI32($iter694); } } $output->writeListEnd(); @@ -21172,14 +21712,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size681 = 0; - $_etype684 = 0; - $xfer += $input->readListBegin($_etype684, $_size681); - for ($_i685 = 0; $_i685 < $_size681; ++$_i685) + $_size695 = 0; + $_etype698 = 0; + $xfer += $input->readListBegin($_etype698, $_size695); + for ($_i699 = 0; $_i699 < $_size695; ++$_i699) { - $elem686 = null; - $xfer += $input->readString($elem686); - $this->tblNames []= $elem686; + $elem700 = null; + $xfer += $input->readString($elem700); + $this->tblNames []= $elem700; } $xfer += $input->readListEnd(); } else { @@ -21220,9 +21760,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter687) + foreach ($this->tblNames as $iter701) { - $xfer += $output->writeString($iter687); + $xfer += $output->writeString($iter701); } } $output->writeListEnd(); @@ -21295,15 +21835,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size688 = 0; - $_etype691 = 0; - $xfer += $input->readListBegin($_etype691, $_size688); - for ($_i692 = 0; $_i692 < $_size688; ++$_i692) + $_size702 = 0; + $_etype705 = 0; + $xfer += $input->readListBegin($_etype705, $_size702); + for ($_i706 = 0; $_i706 < $_size702; ++$_i706) { - $elem693 = null; - $elem693 = new \metastore\Table(); - $xfer += $elem693->read($input); - $this->tables []= $elem693; + $elem707 = null; + $elem707 = new \metastore\Table(); + $xfer += $elem707->read($input); + $this->tables []= $elem707; } $xfer += $input->readListEnd(); } else { @@ -21331,9 +21871,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter694) + foreach ($this->tables as $iter708) { - $xfer += $iter694->write($output); + $xfer += $iter708->write($output); } } $output->writeListEnd(); @@ -21711,17 +22251,17 @@ class Materialization { case 1: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size695 = 0; - $_etype698 = 0; - $xfer += $input->readSetBegin($_etype698, $_size695); - for ($_i699 = 0; $_i699 < $_size695; ++$_i699) + $_size709 = 0; + $_etype712 = 0; + $xfer += $input->readSetBegin($_etype712, $_size709); + for ($_i713 = 0; $_i713 < $_size709; ++$_i713) { - $elem700 = null; - $xfer += $input->readString($elem700); - if (is_scalar($elem700)) { - $this->tablesUsed[$elem700] = true; + $elem714 = null; + $xfer += $input->readString($elem714); + if (is_scalar($elem714)) { + $this->tablesUsed[$elem714] = true; } else { - $this->tablesUsed []= $elem700; + $this->tablesUsed []= $elem714; } } $xfer += $input->readSetEnd(); @@ -21764,12 +22304,12 @@ class Materialization { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter701 => $iter702) + foreach ($this->tablesUsed as $iter715 => $iter716) { - if (is_scalar($iter702)) { - $xfer += $output->writeString($iter701); + if (is_scalar($iter716)) { + $xfer += $output->writeString($iter715); } else { - $xfer += $output->writeString($iter702); + $xfer += $output->writeString($iter716); } } } @@ -23036,15 +23576,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size703 = 0; - $_etype706 = 0; - $xfer += $input->readListBegin($_etype706, $_size703); - for ($_i707 = 0; $_i707 < $_size703; ++$_i707) + $_size717 = 0; + $_etype720 = 0; + $xfer += $input->readListBegin($_etype720, $_size717); + for ($_i721 = 0; $_i721 < $_size717; ++$_i721) { - $elem708 = null; - $elem708 = new \metastore\WMPool(); - $xfer += $elem708->read($input); - $this->pools []= $elem708; + $elem722 = null; + $elem722 = new \metastore\WMPool(); + $xfer += $elem722->read($input); + $this->pools []= $elem722; } $xfer += $input->readListEnd(); } else { @@ -23054,15 +23594,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size709 = 0; - $_etype712 = 0; - $xfer += $input->readListBegin($_etype712, $_size709); - for ($_i713 = 0; $_i713 < $_size709; ++$_i713) + $_size723 = 0; + $_etype726 = 0; + $xfer += $input->readListBegin($_etype726, $_size723); + for ($_i727 = 0; $_i727 < $_size723; ++$_i727) { - $elem714 = null; - $elem714 = new \metastore\WMMapping(); - $xfer += $elem714->read($input); - $this->mappings []= $elem714; + $elem728 = null; + $elem728 = new \metastore\WMMapping(); + $xfer += $elem728->read($input); + $this->mappings []= $elem728; } $xfer += $input->readListEnd(); } else { @@ -23072,15 +23612,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size715 = 0; - $_etype718 = 0; - $xfer += $input->readListBegin($_etype718, $_size715); - for ($_i719 = 0; $_i719 < $_size715; ++$_i719) + $_size729 = 0; + $_etype732 = 0; + $xfer += $input->readListBegin($_etype732, $_size729); + for ($_i733 = 0; $_i733 < $_size729; ++$_i733) { - $elem720 = null; - $elem720 = new \metastore\WMTrigger(); - $xfer += $elem720->read($input); - $this->triggers []= $elem720; + $elem734 = null; + $elem734 = new \metastore\WMTrigger(); + $xfer += $elem734->read($input); + $this->triggers []= $elem734; } $xfer += $input->readListEnd(); } else { @@ -23090,15 +23630,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size721 = 0; - $_etype724 = 0; - $xfer += $input->readListBegin($_etype724, $_size721); - for ($_i725 = 0; $_i725 < $_size721; ++$_i725) + $_size735 = 0; + $_etype738 = 0; + $xfer += $input->readListBegin($_etype738, $_size735); + for ($_i739 = 0; $_i739 < $_size735; ++$_i739) { - $elem726 = null; - $elem726 = new \metastore\WMPoolTrigger(); - $xfer += $elem726->read($input); - $this->poolTriggers []= $elem726; + $elem740 = null; + $elem740 = new \metastore\WMPoolTrigger(); + $xfer += $elem740->read($input); + $this->poolTriggers []= $elem740; } $xfer += $input->readListEnd(); } else { @@ -23134,9 +23674,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter727) + foreach ($this->pools as $iter741) { - $xfer += $iter727->write($output); + $xfer += $iter741->write($output); } } $output->writeListEnd(); @@ -23151,9 +23691,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter728) + foreach ($this->mappings as $iter742) { - $xfer += $iter728->write($output); + $xfer += $iter742->write($output); } } $output->writeListEnd(); @@ -23168,9 +23708,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter729) + foreach ($this->triggers as $iter743) { - $xfer += $iter729->write($output); + $xfer += $iter743->write($output); } } $output->writeListEnd(); @@ -23185,9 +23725,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter730) + foreach ($this->poolTriggers as $iter744) { - $xfer += $iter730->write($output); + $xfer += $iter744->write($output); } } $output->writeListEnd(); @@ -23740,15 +24280,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size731 = 0; - $_etype734 = 0; - $xfer += $input->readListBegin($_etype734, $_size731); - for ($_i735 = 0; $_i735 < $_size731; ++$_i735) + $_size745 = 0; + $_etype748 = 0; + $xfer += $input->readListBegin($_etype748, $_size745); + for ($_i749 = 0; $_i749 < $_size745; ++$_i749) { - $elem736 = null; - $elem736 = new \metastore\WMResourcePlan(); - $xfer += $elem736->read($input); - $this->resourcePlans []= $elem736; + $elem750 = null; + $elem750 = new \metastore\WMResourcePlan(); + $xfer += $elem750->read($input); + $this->resourcePlans []= $elem750; } $xfer += $input->readListEnd(); } else { @@ -23776,9 +24316,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter737) + foreach ($this->resourcePlans as $iter751) { - $xfer += $iter737->write($output); + $xfer += $iter751->write($output); } } $output->writeListEnd(); @@ -24184,14 +24724,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size738 = 0; - $_etype741 = 0; - $xfer += $input->readListBegin($_etype741, $_size738); - for ($_i742 = 0; $_i742 < $_size738; ++$_i742) + $_size752 = 0; + $_etype755 = 0; + $xfer += $input->readListBegin($_etype755, $_size752); + for ($_i756 = 0; $_i756 < $_size752; ++$_i756) { - $elem743 = null; - $xfer += $input->readString($elem743); - $this->errors []= $elem743; + $elem757 = null; + $xfer += $input->readString($elem757); + $this->errors []= $elem757; } $xfer += $input->readListEnd(); } else { @@ -24201,14 +24741,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size744 = 0; - $_etype747 = 0; - $xfer += $input->readListBegin($_etype747, $_size744); - for ($_i748 = 0; $_i748 < $_size744; ++$_i748) + $_size758 = 0; + $_etype761 = 0; + $xfer += $input->readListBegin($_etype761, $_size758); + for ($_i762 = 0; $_i762 < $_size758; ++$_i762) { - $elem749 = null; - $xfer += $input->readString($elem749); - $this->warnings []= $elem749; + $elem763 = null; + $xfer += $input->readString($elem763); + $this->warnings []= $elem763; } $xfer += $input->readListEnd(); } else { @@ -24236,9 +24776,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter750) + foreach ($this->errors as $iter764) { - $xfer += $output->writeString($iter750); + $xfer += $output->writeString($iter764); } } $output->writeListEnd(); @@ -24253,9 +24793,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter751) + foreach ($this->warnings as $iter765) { - $xfer += $output->writeString($iter751); + $xfer += $output->writeString($iter765); } } $output->writeListEnd(); @@ -24928,15 +25468,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size752 = 0; - $_etype755 = 0; - $xfer += $input->readListBegin($_etype755, $_size752); - for ($_i756 = 0; $_i756 < $_size752; ++$_i756) + $_size766 = 0; + $_etype769 = 0; + $xfer += $input->readListBegin($_etype769, $_size766); + for ($_i770 = 0; $_i770 < $_size766; ++$_i770) { - $elem757 = null; - $elem757 = new \metastore\WMTrigger(); - $xfer += $elem757->read($input); - $this->triggers []= $elem757; + $elem771 = null; + $elem771 = new \metastore\WMTrigger(); + $xfer += $elem771->read($input); + $this->triggers []= $elem771; } $xfer += $input->readListEnd(); } else { @@ -24964,9 +25504,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter758) + foreach ($this->triggers as $iter772) { - $xfer += $iter758->write($output); + $xfer += $iter772->write($output); } } $output->writeListEnd(); diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index b3a0524fa2..72c03971ef 100755 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -42,12 +42,13 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') print(' void create_table(Table tbl)') print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') - print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints)') + print(' void create_table_with_constraints(Table tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints)') print(' void drop_constraint(DropConstraintRequest req)') print(' void add_primary_key(AddPrimaryKeyRequest req)') print(' void add_foreign_key(AddForeignKeyRequest req)') print(' void add_unique_constraint(AddUniqueConstraintRequest req)') print(' void add_not_null_constraint(AddNotNullConstraintRequest req)') + print(' void add_default_constraint(AddDefaultConstraintRequest req)') print(' void drop_table(string dbname, string name, bool deleteData)') print(' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)') print(' void truncate_table(string dbName, string tableName, partNames)') @@ -119,6 +120,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request)') print(' UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest request)') print(' NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request)') + print(' DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request)') print(' bool update_table_column_statistics(ColumnStatistics stats_obj)') print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') @@ -386,10 +388,10 @@ elif cmd == 'create_table_with_environment_context': pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),)) elif cmd == 'create_table_with_constraints': - if len(args) != 5: - print('create_table_with_constraints requires 5 args') + if len(args) != 6: + print('create_table_with_constraints requires 6 args') sys.exit(1) - pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),eval(args[4]),)) + pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),eval(args[4]),eval(args[5]),)) elif cmd == 'drop_constraint': if len(args) != 1: @@ -421,6 +423,12 @@ elif cmd == 'add_not_null_constraint': sys.exit(1) pp.pprint(client.add_not_null_constraint(eval(args[0]),)) +elif cmd == 'add_default_constraint': + if len(args) != 1: + print('add_default_constraint requires 1 args') + sys.exit(1) + pp.pprint(client.add_default_constraint(eval(args[0]),)) + elif cmd == 'drop_table': if len(args) != 3: print('drop_table requires 3 args') @@ -847,6 +855,12 @@ elif cmd == 'get_not_null_constraints': sys.exit(1) pp.pprint(client.get_not_null_constraints(eval(args[0]),)) +elif cmd == 'get_default_constraints': + if len(args) != 1: + print('get_default_constraints requires 1 args') + sys.exit(1) + pp.pprint(client.get_default_constraints(eval(args[0]),)) + elif cmd == 'update_table_column_statistics': if len(args) != 1: print('update_table_column_statistics requires 1 args') diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index dfddd4a7c9..392e8ca6dc 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -156,7 +156,7 @@ def create_table_with_environment_context(self, tbl, environment_context): """ pass - def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints): + def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints): """ Parameters: - tbl @@ -164,6 +164,7 @@ def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueCon - foreignKeys - uniqueConstraints - notNullConstraints + - defaultConstraints """ pass @@ -202,6 +203,13 @@ def add_not_null_constraint(self, req): """ pass + def add_default_constraint(self, req): + """ + Parameters: + - req + """ + pass + def drop_table(self, dbname, name, deleteData): """ Parameters: @@ -832,6 +840,13 @@ def get_not_null_constraints(self, request): """ pass + def get_default_constraints(self, request): + """ + Parameters: + - request + """ + pass + def update_table_column_statistics(self, stats_obj): """ Parameters: @@ -2105,7 +2120,7 @@ def recv_create_table_with_environment_context(self): raise result.o4 return - def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints): + def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints): """ Parameters: - tbl @@ -2113,11 +2128,12 @@ def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueCon - foreignKeys - uniqueConstraints - notNullConstraints + - defaultConstraints """ - self.send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints) + self.send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints) self.recv_create_table_with_constraints() - def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints): + def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints): self._oprot.writeMessageBegin('create_table_with_constraints', TMessageType.CALL, self._seqid) args = create_table_with_constraints_args() args.tbl = tbl @@ -2125,6 +2141,7 @@ def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniq args.foreignKeys = foreignKeys args.uniqueConstraints = uniqueConstraints args.notNullConstraints = notNullConstraints + args.defaultConstraints = defaultConstraints args.write(self._oprot) self._oprot.writeMessageEnd() self._oprot.trans.flush() @@ -2315,6 +2332,39 @@ def recv_add_not_null_constraint(self): raise result.o2 return + def add_default_constraint(self, req): + """ + Parameters: + - req + """ + self.send_add_default_constraint(req) + self.recv_add_default_constraint() + + def send_add_default_constraint(self, req): + self._oprot.writeMessageBegin('add_default_constraint', TMessageType.CALL, self._seqid) + args = add_default_constraint_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_add_default_constraint(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = add_default_constraint_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + return + def drop_table(self, dbname, name, deleteData): """ Parameters: @@ -5064,6 +5114,41 @@ def recv_get_not_null_constraints(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_not_null_constraints failed: unknown result") + def get_default_constraints(self, request): + """ + Parameters: + - request + """ + self.send_get_default_constraints(request) + return self.recv_get_default_constraints() + + def send_get_default_constraints(self, request): + self._oprot.writeMessageBegin('get_default_constraints', TMessageType.CALL, self._seqid) + args = get_default_constraints_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_default_constraints(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_default_constraints_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_default_constraints failed: unknown result") + def update_table_column_statistics(self, stats_obj): """ Parameters: @@ -8120,6 +8205,7 @@ def __init__(self, handler): self._processMap["add_foreign_key"] = Processor.process_add_foreign_key self._processMap["add_unique_constraint"] = Processor.process_add_unique_constraint self._processMap["add_not_null_constraint"] = Processor.process_add_not_null_constraint + self._processMap["add_default_constraint"] = Processor.process_add_default_constraint self._processMap["drop_table"] = Processor.process_drop_table self._processMap["drop_table_with_environment_context"] = Processor.process_drop_table_with_environment_context self._processMap["truncate_table"] = Processor.process_truncate_table @@ -8191,6 +8277,7 @@ def __init__(self, handler): self._processMap["get_foreign_keys"] = Processor.process_get_foreign_keys self._processMap["get_unique_constraints"] = Processor.process_get_unique_constraints self._processMap["get_not_null_constraints"] = Processor.process_get_not_null_constraints + self._processMap["get_default_constraints"] = Processor.process_get_default_constraints self._processMap["update_table_column_statistics"] = Processor.process_update_table_column_statistics self._processMap["update_partition_column_statistics"] = Processor.process_update_partition_column_statistics self._processMap["get_table_column_statistics"] = Processor.process_get_table_column_statistics @@ -8770,7 +8857,7 @@ def process_create_table_with_constraints(self, seqid, iprot, oprot): iprot.readMessageEnd() result = create_table_with_constraints_result() try: - self._handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints) + self._handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -8920,6 +9007,31 @@ def process_add_not_null_constraint(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_add_default_constraint(self, seqid, iprot, oprot): + args = add_default_constraint_args() + args.read(iprot) + iprot.readMessageEnd() + result = add_default_constraint_result() + try: + self._handler.add_default_constraint(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("add_default_constraint", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_drop_table(self, seqid, iprot, oprot): args = drop_table_args() args.read(iprot) @@ -10734,6 +10846,31 @@ def process_get_not_null_constraints(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_default_constraints(self, seqid, iprot, oprot): + args = get_default_constraints_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_default_constraints_result() + try: + result.success = self._handler.get_default_constraints(args.request) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_default_constraints", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_update_table_column_statistics(self, seqid, iprot, oprot): args = update_table_column_statistics_args() args.read(iprot) @@ -13728,10 +13865,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype758, _size755) = iprot.readListBegin() - for _i759 in xrange(_size755): - _elem760 = iprot.readString() - self.success.append(_elem760) + (_etype772, _size769) = iprot.readListBegin() + for _i773 in xrange(_size769): + _elem774 = iprot.readString() + self.success.append(_elem774) iprot.readListEnd() else: iprot.skip(ftype) @@ -13754,8 +13891,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter761 in self.success: - oprot.writeString(iter761) + for iter775 in self.success: + oprot.writeString(iter775) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13860,10 +13997,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype765, _size762) = iprot.readListBegin() - for _i766 in xrange(_size762): - _elem767 = iprot.readString() - self.success.append(_elem767) + (_etype779, _size776) = iprot.readListBegin() + for _i780 in xrange(_size776): + _elem781 = iprot.readString() + self.success.append(_elem781) iprot.readListEnd() else: iprot.skip(ftype) @@ -13886,8 +14023,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter768 in self.success: - oprot.writeString(iter768) + for iter782 in self.success: + oprot.writeString(iter782) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14657,12 +14794,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype770, _vtype771, _size769 ) = iprot.readMapBegin() - for _i773 in xrange(_size769): - _key774 = iprot.readString() - _val775 = Type() - _val775.read(iprot) - self.success[_key774] = _val775 + (_ktype784, _vtype785, _size783 ) = iprot.readMapBegin() + for _i787 in xrange(_size783): + _key788 = iprot.readString() + _val789 = Type() + _val789.read(iprot) + self.success[_key788] = _val789 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14685,9 +14822,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter776,viter777 in self.success.items(): - oprot.writeString(kiter776) - viter777.write(oprot) + for kiter790,viter791 in self.success.items(): + oprot.writeString(kiter790) + viter791.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -14830,11 +14967,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype781, _size778) = iprot.readListBegin() - for _i782 in xrange(_size778): - _elem783 = FieldSchema() - _elem783.read(iprot) - self.success.append(_elem783) + (_etype795, _size792) = iprot.readListBegin() + for _i796 in xrange(_size792): + _elem797 = FieldSchema() + _elem797.read(iprot) + self.success.append(_elem797) iprot.readListEnd() else: iprot.skip(ftype) @@ -14869,8 +15006,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter784 in self.success: - iter784.write(oprot) + for iter798 in self.success: + iter798.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15037,11 +15174,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype788, _size785) = iprot.readListBegin() - for _i789 in xrange(_size785): - _elem790 = FieldSchema() - _elem790.read(iprot) - self.success.append(_elem790) + (_etype802, _size799) = iprot.readListBegin() + for _i803 in xrange(_size799): + _elem804 = FieldSchema() + _elem804.read(iprot) + self.success.append(_elem804) iprot.readListEnd() else: iprot.skip(ftype) @@ -15076,8 +15213,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter791 in self.success: - iter791.write(oprot) + for iter805 in self.success: + iter805.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15230,11 +15367,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype795, _size792) = iprot.readListBegin() - for _i796 in xrange(_size792): - _elem797 = FieldSchema() - _elem797.read(iprot) - self.success.append(_elem797) + (_etype809, _size806) = iprot.readListBegin() + for _i810 in xrange(_size806): + _elem811 = FieldSchema() + _elem811.read(iprot) + self.success.append(_elem811) iprot.readListEnd() else: iprot.skip(ftype) @@ -15269,8 +15406,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter798 in self.success: - iter798.write(oprot) + for iter812 in self.success: + iter812.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15437,11 +15574,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype802, _size799) = iprot.readListBegin() - for _i803 in xrange(_size799): - _elem804 = FieldSchema() - _elem804.read(iprot) - self.success.append(_elem804) + (_etype816, _size813) = iprot.readListBegin() + for _i817 in xrange(_size813): + _elem818 = FieldSchema() + _elem818.read(iprot) + self.success.append(_elem818) iprot.readListEnd() else: iprot.skip(ftype) @@ -15476,8 +15613,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter805 in self.success: - iter805.write(oprot) + for iter819 in self.success: + iter819.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15888,6 +16025,7 @@ class create_table_with_constraints_args: - foreignKeys - uniqueConstraints - notNullConstraints + - defaultConstraints """ thrift_spec = ( @@ -15897,14 +16035,16 @@ class create_table_with_constraints_args: (3, TType.LIST, 'foreignKeys', (TType.STRUCT,(SQLForeignKey, SQLForeignKey.thrift_spec)), None, ), # 3 (4, TType.LIST, 'uniqueConstraints', (TType.STRUCT,(SQLUniqueConstraint, SQLUniqueConstraint.thrift_spec)), None, ), # 4 (5, TType.LIST, 'notNullConstraints', (TType.STRUCT,(SQLNotNullConstraint, SQLNotNullConstraint.thrift_spec)), None, ), # 5 + (6, TType.LIST, 'defaultConstraints', (TType.STRUCT,(SQLDefaultConstraint, SQLDefaultConstraint.thrift_spec)), None, ), # 6 ) - def __init__(self, tbl=None, primaryKeys=None, foreignKeys=None, uniqueConstraints=None, notNullConstraints=None,): + def __init__(self, tbl=None, primaryKeys=None, foreignKeys=None, uniqueConstraints=None, notNullConstraints=None, defaultConstraints=None,): self.tbl = tbl self.primaryKeys = primaryKeys self.foreignKeys = foreignKeys self.uniqueConstraints = uniqueConstraints self.notNullConstraints = notNullConstraints + self.defaultConstraints = defaultConstraints def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15924,44 +16064,55 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype809, _size806) = iprot.readListBegin() - for _i810 in xrange(_size806): - _elem811 = SQLPrimaryKey() - _elem811.read(iprot) - self.primaryKeys.append(_elem811) + (_etype823, _size820) = iprot.readListBegin() + for _i824 in xrange(_size820): + _elem825 = SQLPrimaryKey() + _elem825.read(iprot) + self.primaryKeys.append(_elem825) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype815, _size812) = iprot.readListBegin() - for _i816 in xrange(_size812): - _elem817 = SQLForeignKey() - _elem817.read(iprot) - self.foreignKeys.append(_elem817) + (_etype829, _size826) = iprot.readListBegin() + for _i830 in xrange(_size826): + _elem831 = SQLForeignKey() + _elem831.read(iprot) + self.foreignKeys.append(_elem831) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype821, _size818) = iprot.readListBegin() - for _i822 in xrange(_size818): - _elem823 = SQLUniqueConstraint() - _elem823.read(iprot) - self.uniqueConstraints.append(_elem823) + (_etype835, _size832) = iprot.readListBegin() + for _i836 in xrange(_size832): + _elem837 = SQLUniqueConstraint() + _elem837.read(iprot) + self.uniqueConstraints.append(_elem837) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype827, _size824) = iprot.readListBegin() - for _i828 in xrange(_size824): - _elem829 = SQLNotNullConstraint() - _elem829.read(iprot) - self.notNullConstraints.append(_elem829) + (_etype841, _size838) = iprot.readListBegin() + for _i842 in xrange(_size838): + _elem843 = SQLNotNullConstraint() + _elem843.read(iprot) + self.notNullConstraints.append(_elem843) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.LIST: + self.defaultConstraints = [] + (_etype847, _size844) = iprot.readListBegin() + for _i848 in xrange(_size844): + _elem849 = SQLDefaultConstraint() + _elem849.read(iprot) + self.defaultConstraints.append(_elem849) iprot.readListEnd() else: iprot.skip(ftype) @@ -15982,29 +16133,36 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter830 in self.primaryKeys: - iter830.write(oprot) + for iter850 in self.primaryKeys: + iter850.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter831 in self.foreignKeys: - iter831.write(oprot) + for iter851 in self.foreignKeys: + iter851.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter832 in self.uniqueConstraints: - iter832.write(oprot) + for iter852 in self.uniqueConstraints: + iter852.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter833 in self.notNullConstraints: - iter833.write(oprot) + for iter853 in self.notNullConstraints: + iter853.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.defaultConstraints is not None: + oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) + for iter854 in self.defaultConstraints: + iter854.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16021,6 +16179,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.foreignKeys) value = (value * 31) ^ hash(self.uniqueConstraints) value = (value * 31) ^ hash(self.notNullConstraints) + value = (value * 31) ^ hash(self.defaultConstraints) return value def __repr__(self): @@ -16872,6 +17031,152 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class add_default_constraint_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (AddDefaultConstraintRequest, AddDefaultConstraintRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = AddDefaultConstraintRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_default_constraint_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_default_constraint_result: + """ + Attributes: + - o1 + - o2 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_default_constraint_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class drop_table_args: """ Attributes: @@ -17270,10 +17575,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype837, _size834) = iprot.readListBegin() - for _i838 in xrange(_size834): - _elem839 = iprot.readString() - self.partNames.append(_elem839) + (_etype858, _size855) = iprot.readListBegin() + for _i859 in xrange(_size855): + _elem860 = iprot.readString() + self.partNames.append(_elem860) iprot.readListEnd() else: iprot.skip(ftype) @@ -17298,8 +17603,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter840 in self.partNames: - oprot.writeString(iter840) + for iter861 in self.partNames: + oprot.writeString(iter861) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17499,10 +17804,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype844, _size841) = iprot.readListBegin() - for _i845 in xrange(_size841): - _elem846 = iprot.readString() - self.success.append(_elem846) + (_etype865, _size862) = iprot.readListBegin() + for _i866 in xrange(_size862): + _elem867 = iprot.readString() + self.success.append(_elem867) iprot.readListEnd() else: iprot.skip(ftype) @@ -17525,8 +17830,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter847 in self.success: - oprot.writeString(iter847) + for iter868 in self.success: + oprot.writeString(iter868) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17676,10 +17981,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype851, _size848) = iprot.readListBegin() - for _i852 in xrange(_size848): - _elem853 = iprot.readString() - self.success.append(_elem853) + (_etype872, _size869) = iprot.readListBegin() + for _i873 in xrange(_size869): + _elem874 = iprot.readString() + self.success.append(_elem874) iprot.readListEnd() else: iprot.skip(ftype) @@ -17702,8 +18007,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter854 in self.success: - oprot.writeString(iter854) + for iter875 in self.success: + oprot.writeString(iter875) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17827,10 +18132,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype858, _size855) = iprot.readListBegin() - for _i859 in xrange(_size855): - _elem860 = iprot.readString() - self.success.append(_elem860) + (_etype879, _size876) = iprot.readListBegin() + for _i880 in xrange(_size876): + _elem881 = iprot.readString() + self.success.append(_elem881) iprot.readListEnd() else: iprot.skip(ftype) @@ -17853,8 +18158,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter861 in self.success: - oprot.writeString(iter861) + for iter882 in self.success: + oprot.writeString(iter882) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -17927,10 +18232,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype865, _size862) = iprot.readListBegin() - for _i866 in xrange(_size862): - _elem867 = iprot.readString() - self.tbl_types.append(_elem867) + (_etype886, _size883) = iprot.readListBegin() + for _i887 in xrange(_size883): + _elem888 = iprot.readString() + self.tbl_types.append(_elem888) iprot.readListEnd() else: iprot.skip(ftype) @@ -17955,8 +18260,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter868 in self.tbl_types: - oprot.writeString(iter868) + for iter889 in self.tbl_types: + oprot.writeString(iter889) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18012,11 +18317,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype872, _size869) = iprot.readListBegin() - for _i873 in xrange(_size869): - _elem874 = TableMeta() - _elem874.read(iprot) - self.success.append(_elem874) + (_etype893, _size890) = iprot.readListBegin() + for _i894 in xrange(_size890): + _elem895 = TableMeta() + _elem895.read(iprot) + self.success.append(_elem895) iprot.readListEnd() else: iprot.skip(ftype) @@ -18039,8 +18344,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter875 in self.success: - iter875.write(oprot) + for iter896 in self.success: + iter896.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18164,10 +18469,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype879, _size876) = iprot.readListBegin() - for _i880 in xrange(_size876): - _elem881 = iprot.readString() - self.success.append(_elem881) + (_etype900, _size897) = iprot.readListBegin() + for _i901 in xrange(_size897): + _elem902 = iprot.readString() + self.success.append(_elem902) iprot.readListEnd() else: iprot.skip(ftype) @@ -18190,8 +18495,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter882 in self.success: - oprot.writeString(iter882) + for iter903 in self.success: + oprot.writeString(iter903) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18427,10 +18732,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype886, _size883) = iprot.readListBegin() - for _i887 in xrange(_size883): - _elem888 = iprot.readString() - self.tbl_names.append(_elem888) + (_etype907, _size904) = iprot.readListBegin() + for _i908 in xrange(_size904): + _elem909 = iprot.readString() + self.tbl_names.append(_elem909) iprot.readListEnd() else: iprot.skip(ftype) @@ -18451,8 +18756,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter889 in self.tbl_names: - oprot.writeString(iter889) + for iter910 in self.tbl_names: + oprot.writeString(iter910) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18504,11 +18809,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype893, _size890) = iprot.readListBegin() - for _i894 in xrange(_size890): - _elem895 = Table() - _elem895.read(iprot) - self.success.append(_elem895) + (_etype914, _size911) = iprot.readListBegin() + for _i915 in xrange(_size911): + _elem916 = Table() + _elem916.read(iprot) + self.success.append(_elem916) iprot.readListEnd() else: iprot.skip(ftype) @@ -18525,8 +18830,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter896 in self.success: - iter896.write(oprot) + for iter917 in self.success: + iter917.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18918,10 +19223,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype900, _size897) = iprot.readListBegin() - for _i901 in xrange(_size897): - _elem902 = iprot.readString() - self.tbl_names.append(_elem902) + (_etype921, _size918) = iprot.readListBegin() + for _i922 in xrange(_size918): + _elem923 = iprot.readString() + self.tbl_names.append(_elem923) iprot.readListEnd() else: iprot.skip(ftype) @@ -18942,8 +19247,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter903 in self.tbl_names: - oprot.writeString(iter903) + for iter924 in self.tbl_names: + oprot.writeString(iter924) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19004,12 +19309,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype905, _vtype906, _size904 ) = iprot.readMapBegin() - for _i908 in xrange(_size904): - _key909 = iprot.readString() - _val910 = Materialization() - _val910.read(iprot) - self.success[_key909] = _val910 + (_ktype926, _vtype927, _size925 ) = iprot.readMapBegin() + for _i929 in xrange(_size925): + _key930 = iprot.readString() + _val931 = Materialization() + _val931.read(iprot) + self.success[_key930] = _val931 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19044,9 +19349,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter911,viter912 in self.success.items(): - oprot.writeString(kiter911) - viter912.write(oprot) + for kiter932,viter933 in self.success.items(): + oprot.writeString(kiter932) + viter933.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19398,10 +19703,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype916, _size913) = iprot.readListBegin() - for _i917 in xrange(_size913): - _elem918 = iprot.readString() - self.success.append(_elem918) + (_etype937, _size934) = iprot.readListBegin() + for _i938 in xrange(_size934): + _elem939 = iprot.readString() + self.success.append(_elem939) iprot.readListEnd() else: iprot.skip(ftype) @@ -19436,8 +19741,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter919 in self.success: - oprot.writeString(iter919) + for iter940 in self.success: + oprot.writeString(iter940) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20407,11 +20712,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype923, _size920) = iprot.readListBegin() - for _i924 in xrange(_size920): - _elem925 = Partition() - _elem925.read(iprot) - self.new_parts.append(_elem925) + (_etype944, _size941) = iprot.readListBegin() + for _i945 in xrange(_size941): + _elem946 = Partition() + _elem946.read(iprot) + self.new_parts.append(_elem946) iprot.readListEnd() else: iprot.skip(ftype) @@ -20428,8 +20733,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter926 in self.new_parts: - iter926.write(oprot) + for iter947 in self.new_parts: + iter947.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20587,11 +20892,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype930, _size927) = iprot.readListBegin() - for _i931 in xrange(_size927): - _elem932 = PartitionSpec() - _elem932.read(iprot) - self.new_parts.append(_elem932) + (_etype951, _size948) = iprot.readListBegin() + for _i952 in xrange(_size948): + _elem953 = PartitionSpec() + _elem953.read(iprot) + self.new_parts.append(_elem953) iprot.readListEnd() else: iprot.skip(ftype) @@ -20608,8 +20913,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter933 in self.new_parts: - iter933.write(oprot) + for iter954 in self.new_parts: + iter954.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20783,10 +21088,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype937, _size934) = iprot.readListBegin() - for _i938 in xrange(_size934): - _elem939 = iprot.readString() - self.part_vals.append(_elem939) + (_etype958, _size955) = iprot.readListBegin() + for _i959 in xrange(_size955): + _elem960 = iprot.readString() + self.part_vals.append(_elem960) iprot.readListEnd() else: iprot.skip(ftype) @@ -20811,8 +21116,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter940 in self.part_vals: - oprot.writeString(iter940) + for iter961 in self.part_vals: + oprot.writeString(iter961) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21165,10 +21470,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype944, _size941) = iprot.readListBegin() - for _i945 in xrange(_size941): - _elem946 = iprot.readString() - self.part_vals.append(_elem946) + (_etype965, _size962) = iprot.readListBegin() + for _i966 in xrange(_size962): + _elem967 = iprot.readString() + self.part_vals.append(_elem967) iprot.readListEnd() else: iprot.skip(ftype) @@ -21199,8 +21504,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter947 in self.part_vals: - oprot.writeString(iter947) + for iter968 in self.part_vals: + oprot.writeString(iter968) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -21795,10 +22100,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype951, _size948) = iprot.readListBegin() - for _i952 in xrange(_size948): - _elem953 = iprot.readString() - self.part_vals.append(_elem953) + (_etype972, _size969) = iprot.readListBegin() + for _i973 in xrange(_size969): + _elem974 = iprot.readString() + self.part_vals.append(_elem974) iprot.readListEnd() else: iprot.skip(ftype) @@ -21828,8 +22133,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter954 in self.part_vals: - oprot.writeString(iter954) + for iter975 in self.part_vals: + oprot.writeString(iter975) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -22002,10 +22307,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype958, _size955) = iprot.readListBegin() - for _i959 in xrange(_size955): - _elem960 = iprot.readString() - self.part_vals.append(_elem960) + (_etype979, _size976) = iprot.readListBegin() + for _i980 in xrange(_size976): + _elem981 = iprot.readString() + self.part_vals.append(_elem981) iprot.readListEnd() else: iprot.skip(ftype) @@ -22041,8 +22346,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter961 in self.part_vals: - oprot.writeString(iter961) + for iter982 in self.part_vals: + oprot.writeString(iter982) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -22779,10 +23084,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype965, _size962) = iprot.readListBegin() - for _i966 in xrange(_size962): - _elem967 = iprot.readString() - self.part_vals.append(_elem967) + (_etype986, _size983) = iprot.readListBegin() + for _i987 in xrange(_size983): + _elem988 = iprot.readString() + self.part_vals.append(_elem988) iprot.readListEnd() else: iprot.skip(ftype) @@ -22807,8 +23112,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter968 in self.part_vals: - oprot.writeString(iter968) + for iter989 in self.part_vals: + oprot.writeString(iter989) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22967,11 +23272,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype970, _vtype971, _size969 ) = iprot.readMapBegin() - for _i973 in xrange(_size969): - _key974 = iprot.readString() - _val975 = iprot.readString() - self.partitionSpecs[_key974] = _val975 + (_ktype991, _vtype992, _size990 ) = iprot.readMapBegin() + for _i994 in xrange(_size990): + _key995 = iprot.readString() + _val996 = iprot.readString() + self.partitionSpecs[_key995] = _val996 iprot.readMapEnd() else: iprot.skip(ftype) @@ -23008,9 +23313,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter976,viter977 in self.partitionSpecs.items(): - oprot.writeString(kiter976) - oprot.writeString(viter977) + for kiter997,viter998 in self.partitionSpecs.items(): + oprot.writeString(kiter997) + oprot.writeString(viter998) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -23215,11 +23520,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype979, _vtype980, _size978 ) = iprot.readMapBegin() - for _i982 in xrange(_size978): - _key983 = iprot.readString() - _val984 = iprot.readString() - self.partitionSpecs[_key983] = _val984 + (_ktype1000, _vtype1001, _size999 ) = iprot.readMapBegin() + for _i1003 in xrange(_size999): + _key1004 = iprot.readString() + _val1005 = iprot.readString() + self.partitionSpecs[_key1004] = _val1005 iprot.readMapEnd() else: iprot.skip(ftype) @@ -23256,9 +23561,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter985,viter986 in self.partitionSpecs.items(): - oprot.writeString(kiter985) - oprot.writeString(viter986) + for kiter1006,viter1007 in self.partitionSpecs.items(): + oprot.writeString(kiter1006) + oprot.writeString(viter1007) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -23341,11 +23646,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype990, _size987) = iprot.readListBegin() - for _i991 in xrange(_size987): - _elem992 = Partition() - _elem992.read(iprot) - self.success.append(_elem992) + (_etype1011, _size1008) = iprot.readListBegin() + for _i1012 in xrange(_size1008): + _elem1013 = Partition() + _elem1013.read(iprot) + self.success.append(_elem1013) iprot.readListEnd() else: iprot.skip(ftype) @@ -23386,8 +23691,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter993 in self.success: - iter993.write(oprot) + for iter1014 in self.success: + iter1014.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23481,10 +23786,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype997, _size994) = iprot.readListBegin() - for _i998 in xrange(_size994): - _elem999 = iprot.readString() - self.part_vals.append(_elem999) + (_etype1018, _size1015) = iprot.readListBegin() + for _i1019 in xrange(_size1015): + _elem1020 = iprot.readString() + self.part_vals.append(_elem1020) iprot.readListEnd() else: iprot.skip(ftype) @@ -23496,10 +23801,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1003, _size1000) = iprot.readListBegin() - for _i1004 in xrange(_size1000): - _elem1005 = iprot.readString() - self.group_names.append(_elem1005) + (_etype1024, _size1021) = iprot.readListBegin() + for _i1025 in xrange(_size1021): + _elem1026 = iprot.readString() + self.group_names.append(_elem1026) iprot.readListEnd() else: iprot.skip(ftype) @@ -23524,8 +23829,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1006 in self.part_vals: - oprot.writeString(iter1006) + for iter1027 in self.part_vals: + oprot.writeString(iter1027) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -23535,8 +23840,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1007 in self.group_names: - oprot.writeString(iter1007) + for iter1028 in self.group_names: + oprot.writeString(iter1028) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23965,11 +24270,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1011, _size1008) = iprot.readListBegin() - for _i1012 in xrange(_size1008): - _elem1013 = Partition() - _elem1013.read(iprot) - self.success.append(_elem1013) + (_etype1032, _size1029) = iprot.readListBegin() + for _i1033 in xrange(_size1029): + _elem1034 = Partition() + _elem1034.read(iprot) + self.success.append(_elem1034) iprot.readListEnd() else: iprot.skip(ftype) @@ -23998,8 +24303,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1014 in self.success: - iter1014.write(oprot) + for iter1035 in self.success: + iter1035.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24093,10 +24398,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1018, _size1015) = iprot.readListBegin() - for _i1019 in xrange(_size1015): - _elem1020 = iprot.readString() - self.group_names.append(_elem1020) + (_etype1039, _size1036) = iprot.readListBegin() + for _i1040 in xrange(_size1036): + _elem1041 = iprot.readString() + self.group_names.append(_elem1041) iprot.readListEnd() else: iprot.skip(ftype) @@ -24129,8 +24434,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1021 in self.group_names: - oprot.writeString(iter1021) + for iter1042 in self.group_names: + oprot.writeString(iter1042) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24191,11 +24496,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1025, _size1022) = iprot.readListBegin() - for _i1026 in xrange(_size1022): - _elem1027 = Partition() - _elem1027.read(iprot) - self.success.append(_elem1027) + (_etype1046, _size1043) = iprot.readListBegin() + for _i1047 in xrange(_size1043): + _elem1048 = Partition() + _elem1048.read(iprot) + self.success.append(_elem1048) iprot.readListEnd() else: iprot.skip(ftype) @@ -24224,8 +24529,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1028 in self.success: - iter1028.write(oprot) + for iter1049 in self.success: + iter1049.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24383,11 +24688,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1032, _size1029) = iprot.readListBegin() - for _i1033 in xrange(_size1029): - _elem1034 = PartitionSpec() - _elem1034.read(iprot) - self.success.append(_elem1034) + (_etype1053, _size1050) = iprot.readListBegin() + for _i1054 in xrange(_size1050): + _elem1055 = PartitionSpec() + _elem1055.read(iprot) + self.success.append(_elem1055) iprot.readListEnd() else: iprot.skip(ftype) @@ -24416,8 +24721,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1035 in self.success: - iter1035.write(oprot) + for iter1056 in self.success: + iter1056.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24575,10 +24880,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1039, _size1036) = iprot.readListBegin() - for _i1040 in xrange(_size1036): - _elem1041 = iprot.readString() - self.success.append(_elem1041) + (_etype1060, _size1057) = iprot.readListBegin() + for _i1061 in xrange(_size1057): + _elem1062 = iprot.readString() + self.success.append(_elem1062) iprot.readListEnd() else: iprot.skip(ftype) @@ -24607,8 +24912,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1042 in self.success: - oprot.writeString(iter1042) + for iter1063 in self.success: + oprot.writeString(iter1063) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24848,10 +25153,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1046, _size1043) = iprot.readListBegin() - for _i1047 in xrange(_size1043): - _elem1048 = iprot.readString() - self.part_vals.append(_elem1048) + (_etype1067, _size1064) = iprot.readListBegin() + for _i1068 in xrange(_size1064): + _elem1069 = iprot.readString() + self.part_vals.append(_elem1069) iprot.readListEnd() else: iprot.skip(ftype) @@ -24881,8 +25186,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1049 in self.part_vals: - oprot.writeString(iter1049) + for iter1070 in self.part_vals: + oprot.writeString(iter1070) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -24946,11 +25251,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1053, _size1050) = iprot.readListBegin() - for _i1054 in xrange(_size1050): - _elem1055 = Partition() - _elem1055.read(iprot) - self.success.append(_elem1055) + (_etype1074, _size1071) = iprot.readListBegin() + for _i1075 in xrange(_size1071): + _elem1076 = Partition() + _elem1076.read(iprot) + self.success.append(_elem1076) iprot.readListEnd() else: iprot.skip(ftype) @@ -24979,8 +25284,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1056 in self.success: - iter1056.write(oprot) + for iter1077 in self.success: + iter1077.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25067,10 +25372,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1060, _size1057) = iprot.readListBegin() - for _i1061 in xrange(_size1057): - _elem1062 = iprot.readString() - self.part_vals.append(_elem1062) + (_etype1081, _size1078) = iprot.readListBegin() + for _i1082 in xrange(_size1078): + _elem1083 = iprot.readString() + self.part_vals.append(_elem1083) iprot.readListEnd() else: iprot.skip(ftype) @@ -25087,10 +25392,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1066, _size1063) = iprot.readListBegin() - for _i1067 in xrange(_size1063): - _elem1068 = iprot.readString() - self.group_names.append(_elem1068) + (_etype1087, _size1084) = iprot.readListBegin() + for _i1088 in xrange(_size1084): + _elem1089 = iprot.readString() + self.group_names.append(_elem1089) iprot.readListEnd() else: iprot.skip(ftype) @@ -25115,8 +25420,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1069 in self.part_vals: - oprot.writeString(iter1069) + for iter1090 in self.part_vals: + oprot.writeString(iter1090) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -25130,8 +25435,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1070 in self.group_names: - oprot.writeString(iter1070) + for iter1091 in self.group_names: + oprot.writeString(iter1091) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25193,11 +25498,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1074, _size1071) = iprot.readListBegin() - for _i1075 in xrange(_size1071): - _elem1076 = Partition() - _elem1076.read(iprot) - self.success.append(_elem1076) + (_etype1095, _size1092) = iprot.readListBegin() + for _i1096 in xrange(_size1092): + _elem1097 = Partition() + _elem1097.read(iprot) + self.success.append(_elem1097) iprot.readListEnd() else: iprot.skip(ftype) @@ -25226,8 +25531,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1077 in self.success: - iter1077.write(oprot) + for iter1098 in self.success: + iter1098.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25308,10 +25613,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1081, _size1078) = iprot.readListBegin() - for _i1082 in xrange(_size1078): - _elem1083 = iprot.readString() - self.part_vals.append(_elem1083) + (_etype1102, _size1099) = iprot.readListBegin() + for _i1103 in xrange(_size1099): + _elem1104 = iprot.readString() + self.part_vals.append(_elem1104) iprot.readListEnd() else: iprot.skip(ftype) @@ -25341,8 +25646,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1084 in self.part_vals: - oprot.writeString(iter1084) + for iter1105 in self.part_vals: + oprot.writeString(iter1105) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -25406,10 +25711,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1088, _size1085) = iprot.readListBegin() - for _i1089 in xrange(_size1085): - _elem1090 = iprot.readString() - self.success.append(_elem1090) + (_etype1109, _size1106) = iprot.readListBegin() + for _i1110 in xrange(_size1106): + _elem1111 = iprot.readString() + self.success.append(_elem1111) iprot.readListEnd() else: iprot.skip(ftype) @@ -25438,8 +25743,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1091 in self.success: - oprot.writeString(iter1091) + for iter1112 in self.success: + oprot.writeString(iter1112) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25610,11 +25915,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1095, _size1092) = iprot.readListBegin() - for _i1096 in xrange(_size1092): - _elem1097 = Partition() - _elem1097.read(iprot) - self.success.append(_elem1097) + (_etype1116, _size1113) = iprot.readListBegin() + for _i1117 in xrange(_size1113): + _elem1118 = Partition() + _elem1118.read(iprot) + self.success.append(_elem1118) iprot.readListEnd() else: iprot.skip(ftype) @@ -25643,8 +25948,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1098 in self.success: - iter1098.write(oprot) + for iter1119 in self.success: + iter1119.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25815,11 +26120,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1102, _size1099) = iprot.readListBegin() - for _i1103 in xrange(_size1099): - _elem1104 = PartitionSpec() - _elem1104.read(iprot) - self.success.append(_elem1104) + (_etype1123, _size1120) = iprot.readListBegin() + for _i1124 in xrange(_size1120): + _elem1125 = PartitionSpec() + _elem1125.read(iprot) + self.success.append(_elem1125) iprot.readListEnd() else: iprot.skip(ftype) @@ -25848,8 +26153,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1105 in self.success: - iter1105.write(oprot) + for iter1126 in self.success: + iter1126.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26269,10 +26574,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1109, _size1106) = iprot.readListBegin() - for _i1110 in xrange(_size1106): - _elem1111 = iprot.readString() - self.names.append(_elem1111) + (_etype1130, _size1127) = iprot.readListBegin() + for _i1131 in xrange(_size1127): + _elem1132 = iprot.readString() + self.names.append(_elem1132) iprot.readListEnd() else: iprot.skip(ftype) @@ -26297,8 +26602,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1112 in self.names: - oprot.writeString(iter1112) + for iter1133 in self.names: + oprot.writeString(iter1133) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26357,11 +26662,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1116, _size1113) = iprot.readListBegin() - for _i1117 in xrange(_size1113): - _elem1118 = Partition() - _elem1118.read(iprot) - self.success.append(_elem1118) + (_etype1137, _size1134) = iprot.readListBegin() + for _i1138 in xrange(_size1134): + _elem1139 = Partition() + _elem1139.read(iprot) + self.success.append(_elem1139) iprot.readListEnd() else: iprot.skip(ftype) @@ -26390,8 +26695,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1119 in self.success: - iter1119.write(oprot) + for iter1140 in self.success: + iter1140.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26641,11 +26946,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1123, _size1120) = iprot.readListBegin() - for _i1124 in xrange(_size1120): - _elem1125 = Partition() - _elem1125.read(iprot) - self.new_parts.append(_elem1125) + (_etype1144, _size1141) = iprot.readListBegin() + for _i1145 in xrange(_size1141): + _elem1146 = Partition() + _elem1146.read(iprot) + self.new_parts.append(_elem1146) iprot.readListEnd() else: iprot.skip(ftype) @@ -26670,8 +26975,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1126 in self.new_parts: - iter1126.write(oprot) + for iter1147 in self.new_parts: + iter1147.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26824,11 +27129,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1130, _size1127) = iprot.readListBegin() - for _i1131 in xrange(_size1127): - _elem1132 = Partition() - _elem1132.read(iprot) - self.new_parts.append(_elem1132) + (_etype1151, _size1148) = iprot.readListBegin() + for _i1152 in xrange(_size1148): + _elem1153 = Partition() + _elem1153.read(iprot) + self.new_parts.append(_elem1153) iprot.readListEnd() else: iprot.skip(ftype) @@ -26859,8 +27164,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1133 in self.new_parts: - iter1133.write(oprot) + for iter1154 in self.new_parts: + iter1154.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -27204,10 +27509,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1137, _size1134) = iprot.readListBegin() - for _i1138 in xrange(_size1134): - _elem1139 = iprot.readString() - self.part_vals.append(_elem1139) + (_etype1158, _size1155) = iprot.readListBegin() + for _i1159 in xrange(_size1155): + _elem1160 = iprot.readString() + self.part_vals.append(_elem1160) iprot.readListEnd() else: iprot.skip(ftype) @@ -27238,8 +27543,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1140 in self.part_vals: - oprot.writeString(iter1140) + for iter1161 in self.part_vals: + oprot.writeString(iter1161) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -27381,10 +27686,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1144, _size1141) = iprot.readListBegin() - for _i1145 in xrange(_size1141): - _elem1146 = iprot.readString() - self.part_vals.append(_elem1146) + (_etype1165, _size1162) = iprot.readListBegin() + for _i1166 in xrange(_size1162): + _elem1167 = iprot.readString() + self.part_vals.append(_elem1167) iprot.readListEnd() else: iprot.skip(ftype) @@ -27406,8 +27711,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1147 in self.part_vals: - oprot.writeString(iter1147) + for iter1168 in self.part_vals: + oprot.writeString(iter1168) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -27765,10 +28070,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1151, _size1148) = iprot.readListBegin() - for _i1152 in xrange(_size1148): - _elem1153 = iprot.readString() - self.success.append(_elem1153) + (_etype1172, _size1169) = iprot.readListBegin() + for _i1173 in xrange(_size1169): + _elem1174 = iprot.readString() + self.success.append(_elem1174) iprot.readListEnd() else: iprot.skip(ftype) @@ -27791,8 +28096,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1154 in self.success: - oprot.writeString(iter1154) + for iter1175 in self.success: + oprot.writeString(iter1175) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27916,11 +28221,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1156, _vtype1157, _size1155 ) = iprot.readMapBegin() - for _i1159 in xrange(_size1155): - _key1160 = iprot.readString() - _val1161 = iprot.readString() - self.success[_key1160] = _val1161 + (_ktype1177, _vtype1178, _size1176 ) = iprot.readMapBegin() + for _i1180 in xrange(_size1176): + _key1181 = iprot.readString() + _val1182 = iprot.readString() + self.success[_key1181] = _val1182 iprot.readMapEnd() else: iprot.skip(ftype) @@ -27943,9 +28248,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1162,viter1163 in self.success.items(): - oprot.writeString(kiter1162) - oprot.writeString(viter1163) + for kiter1183,viter1184 in self.success.items(): + oprot.writeString(kiter1183) + oprot.writeString(viter1184) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28021,11 +28326,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1165, _vtype1166, _size1164 ) = iprot.readMapBegin() - for _i1168 in xrange(_size1164): - _key1169 = iprot.readString() - _val1170 = iprot.readString() - self.part_vals[_key1169] = _val1170 + (_ktype1186, _vtype1187, _size1185 ) = iprot.readMapBegin() + for _i1189 in xrange(_size1185): + _key1190 = iprot.readString() + _val1191 = iprot.readString() + self.part_vals[_key1190] = _val1191 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28055,9 +28360,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1171,viter1172 in self.part_vals.items(): - oprot.writeString(kiter1171) - oprot.writeString(viter1172) + for kiter1192,viter1193 in self.part_vals.items(): + oprot.writeString(kiter1192) + oprot.writeString(viter1193) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -28271,11 +28576,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1174, _vtype1175, _size1173 ) = iprot.readMapBegin() - for _i1177 in xrange(_size1173): - _key1178 = iprot.readString() - _val1179 = iprot.readString() - self.part_vals[_key1178] = _val1179 + (_ktype1195, _vtype1196, _size1194 ) = iprot.readMapBegin() + for _i1198 in xrange(_size1194): + _key1199 = iprot.readString() + _val1200 = iprot.readString() + self.part_vals[_key1199] = _val1200 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28305,9 +28610,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1180,viter1181 in self.part_vals.items(): - oprot.writeString(kiter1180) - oprot.writeString(viter1181) + for kiter1201,viter1202 in self.part_vals.items(): + oprot.writeString(kiter1201) + oprot.writeString(viter1202) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -29362,11 +29667,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1185, _size1182) = iprot.readListBegin() - for _i1186 in xrange(_size1182): - _elem1187 = Index() - _elem1187.read(iprot) - self.success.append(_elem1187) + (_etype1206, _size1203) = iprot.readListBegin() + for _i1207 in xrange(_size1203): + _elem1208 = Index() + _elem1208.read(iprot) + self.success.append(_elem1208) iprot.readListEnd() else: iprot.skip(ftype) @@ -29395,8 +29700,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1188 in self.success: - iter1188.write(oprot) + for iter1209 in self.success: + iter1209.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29551,10 +29856,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1192, _size1189) = iprot.readListBegin() - for _i1193 in xrange(_size1189): - _elem1194 = iprot.readString() - self.success.append(_elem1194) + (_etype1213, _size1210) = iprot.readListBegin() + for _i1214 in xrange(_size1210): + _elem1215 = iprot.readString() + self.success.append(_elem1215) iprot.readListEnd() else: iprot.skip(ftype) @@ -29577,8 +29882,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1195 in self.success: - oprot.writeString(iter1195) + for iter1216 in self.success: + oprot.writeString(iter1216) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -30245,6 +30550,165 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_default_constraints_args: + """ + Attributes: + - request + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (DefaultConstraintsRequest, DefaultConstraintsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, request=None,): + self.request = request + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = DefaultConstraintsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_default_constraints_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.request) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_default_constraints_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (DefaultConstraintsResponse, DefaultConstraintsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = DefaultConstraintsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_default_constraints_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class update_table_column_statistics_args: """ Attributes: @@ -32762,10 +33226,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1199, _size1196) = iprot.readListBegin() - for _i1200 in xrange(_size1196): - _elem1201 = iprot.readString() - self.success.append(_elem1201) + (_etype1220, _size1217) = iprot.readListBegin() + for _i1221 in xrange(_size1217): + _elem1222 = iprot.readString() + self.success.append(_elem1222) iprot.readListEnd() else: iprot.skip(ftype) @@ -32788,8 +33252,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1202 in self.success: - oprot.writeString(iter1202) + for iter1223 in self.success: + oprot.writeString(iter1223) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33477,10 +33941,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1206, _size1203) = iprot.readListBegin() - for _i1207 in xrange(_size1203): - _elem1208 = iprot.readString() - self.success.append(_elem1208) + (_etype1227, _size1224) = iprot.readListBegin() + for _i1228 in xrange(_size1224): + _elem1229 = iprot.readString() + self.success.append(_elem1229) iprot.readListEnd() else: iprot.skip(ftype) @@ -33503,8 +33967,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1209 in self.success: - oprot.writeString(iter1209) + for iter1230 in self.success: + oprot.writeString(iter1230) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34018,11 +34482,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1213, _size1210) = iprot.readListBegin() - for _i1214 in xrange(_size1210): - _elem1215 = Role() - _elem1215.read(iprot) - self.success.append(_elem1215) + (_etype1234, _size1231) = iprot.readListBegin() + for _i1235 in xrange(_size1231): + _elem1236 = Role() + _elem1236.read(iprot) + self.success.append(_elem1236) iprot.readListEnd() else: iprot.skip(ftype) @@ -34045,8 +34509,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1216 in self.success: - iter1216.write(oprot) + for iter1237 in self.success: + iter1237.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34555,10 +35019,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1220, _size1217) = iprot.readListBegin() - for _i1221 in xrange(_size1217): - _elem1222 = iprot.readString() - self.group_names.append(_elem1222) + (_etype1241, _size1238) = iprot.readListBegin() + for _i1242 in xrange(_size1238): + _elem1243 = iprot.readString() + self.group_names.append(_elem1243) iprot.readListEnd() else: iprot.skip(ftype) @@ -34583,8 +35047,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1223 in self.group_names: - oprot.writeString(iter1223) + for iter1244 in self.group_names: + oprot.writeString(iter1244) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34811,11 +35275,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1227, _size1224) = iprot.readListBegin() - for _i1228 in xrange(_size1224): - _elem1229 = HiveObjectPrivilege() - _elem1229.read(iprot) - self.success.append(_elem1229) + (_etype1248, _size1245) = iprot.readListBegin() + for _i1249 in xrange(_size1245): + _elem1250 = HiveObjectPrivilege() + _elem1250.read(iprot) + self.success.append(_elem1250) iprot.readListEnd() else: iprot.skip(ftype) @@ -34838,8 +35302,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1230 in self.success: - iter1230.write(oprot) + for iter1251 in self.success: + iter1251.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35337,10 +35801,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1234, _size1231) = iprot.readListBegin() - for _i1235 in xrange(_size1231): - _elem1236 = iprot.readString() - self.group_names.append(_elem1236) + (_etype1255, _size1252) = iprot.readListBegin() + for _i1256 in xrange(_size1252): + _elem1257 = iprot.readString() + self.group_names.append(_elem1257) iprot.readListEnd() else: iprot.skip(ftype) @@ -35361,8 +35825,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1237 in self.group_names: - oprot.writeString(iter1237) + for iter1258 in self.group_names: + oprot.writeString(iter1258) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35417,10 +35881,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1241, _size1238) = iprot.readListBegin() - for _i1242 in xrange(_size1238): - _elem1243 = iprot.readString() - self.success.append(_elem1243) + (_etype1262, _size1259) = iprot.readListBegin() + for _i1263 in xrange(_size1259): + _elem1264 = iprot.readString() + self.success.append(_elem1264) iprot.readListEnd() else: iprot.skip(ftype) @@ -35443,8 +35907,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1244 in self.success: - oprot.writeString(iter1244) + for iter1265 in self.success: + oprot.writeString(iter1265) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36376,10 +36840,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1248, _size1245) = iprot.readListBegin() - for _i1249 in xrange(_size1245): - _elem1250 = iprot.readString() - self.success.append(_elem1250) + (_etype1269, _size1266) = iprot.readListBegin() + for _i1270 in xrange(_size1266): + _elem1271 = iprot.readString() + self.success.append(_elem1271) iprot.readListEnd() else: iprot.skip(ftype) @@ -36396,8 +36860,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1251 in self.success: - oprot.writeString(iter1251) + for iter1272 in self.success: + oprot.writeString(iter1272) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36924,10 +37388,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1255, _size1252) = iprot.readListBegin() - for _i1256 in xrange(_size1252): - _elem1257 = iprot.readString() - self.success.append(_elem1257) + (_etype1276, _size1273) = iprot.readListBegin() + for _i1277 in xrange(_size1273): + _elem1278 = iprot.readString() + self.success.append(_elem1278) iprot.readListEnd() else: iprot.skip(ftype) @@ -36944,8 +37408,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1258 in self.success: - oprot.writeString(iter1258) + for iter1279 in self.success: + oprot.writeString(iter1279) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 0c60aff5aa..883ecfe280 100644 --- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -1154,6 +1154,162 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class SQLDefaultConstraint: + """ + Attributes: + - table_db + - table_name + - column_name + - default_value + - dc_name + - enable_cstr + - validate_cstr + - rely_cstr + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'table_db', None, None, ), # 1 + (2, TType.STRING, 'table_name', None, None, ), # 2 + (3, TType.STRING, 'column_name', None, None, ), # 3 + (4, TType.STRING, 'default_value', None, None, ), # 4 + (5, TType.STRING, 'dc_name', None, None, ), # 5 + (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 + (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 + (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + ) + + def __init__(self, table_db=None, table_name=None, column_name=None, default_value=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.table_db = table_db + self.table_name = table_name + self.column_name = column_name + self.default_value = default_value + self.dc_name = dc_name + self.enable_cstr = enable_cstr + self.validate_cstr = validate_cstr + self.rely_cstr = rely_cstr + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.table_db = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.column_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.default_value = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.dc_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.BOOL: + self.enable_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.BOOL: + self.validate_cstr = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.BOOL: + self.rely_cstr = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SQLDefaultConstraint') + if self.table_db is not None: + oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeString(self.table_db) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeString(self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeString(self.column_name) + oprot.writeFieldEnd() + if self.default_value is not None: + oprot.writeFieldBegin('default_value', TType.STRING, 4) + oprot.writeString(self.default_value) + oprot.writeFieldEnd() + if self.dc_name is not None: + oprot.writeFieldBegin('dc_name', TType.STRING, 5) + oprot.writeString(self.dc_name) + oprot.writeFieldEnd() + if self.enable_cstr is not None: + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) + oprot.writeBool(self.enable_cstr) + oprot.writeFieldEnd() + if self.validate_cstr is not None: + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) + oprot.writeBool(self.validate_cstr) + oprot.writeFieldEnd() + if self.rely_cstr is not None: + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) + oprot.writeBool(self.rely_cstr) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.table_db) + value = (value * 31) ^ hash(self.table_name) + value = (value * 31) ^ hash(self.column_name) + value = (value * 31) ^ hash(self.default_value) + value = (value * 31) ^ hash(self.dc_name) + value = (value * 31) ^ hash(self.enable_cstr) + value = (value * 31) ^ hash(self.validate_cstr) + value = (value * 31) ^ hash(self.rely_cstr) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Type: """ Attributes: @@ -6982,6 +7138,164 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class DefaultConstraintsRequest: + """ + Attributes: + - db_name + - tbl_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + ) + + def __init__(self, db_name=None, tbl_name=None,): + self.db_name = db_name + self.tbl_name = tbl_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('DefaultConstraintsRequest') + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.db_name is None: + raise TProtocol.TProtocolException(message='Required field db_name is unset!') + if self.tbl_name is None: + raise TProtocol.TProtocolException(message='Required field tbl_name is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.db_name) + value = (value * 31) ^ hash(self.tbl_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class DefaultConstraintsResponse: + """ + Attributes: + - defaultConstraints + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'defaultConstraints', (TType.STRUCT,(SQLDefaultConstraint, SQLDefaultConstraint.thrift_spec)), None, ), # 1 + ) + + def __init__(self, defaultConstraints=None,): + self.defaultConstraints = defaultConstraints + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.defaultConstraints = [] + (_etype316, _size313) = iprot.readListBegin() + for _i317 in xrange(_size313): + _elem318 = SQLDefaultConstraint() + _elem318.read(iprot) + self.defaultConstraints.append(_elem318) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('DefaultConstraintsResponse') + if self.defaultConstraints is not None: + oprot.writeFieldBegin('defaultConstraints', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) + for iter319 in self.defaultConstraints: + iter319.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.defaultConstraints is None: + raise TProtocol.TProtocolException(message='Required field defaultConstraints is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.defaultConstraints) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class DropConstraintRequest: """ Attributes: @@ -7105,11 +7419,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeyCols = [] - (_etype316, _size313) = iprot.readListBegin() - for _i317 in xrange(_size313): - _elem318 = SQLPrimaryKey() - _elem318.read(iprot) - self.primaryKeyCols.append(_elem318) + (_etype323, _size320) = iprot.readListBegin() + for _i324 in xrange(_size320): + _elem325 = SQLPrimaryKey() + _elem325.read(iprot) + self.primaryKeyCols.append(_elem325) iprot.readListEnd() else: iprot.skip(ftype) @@ -7126,8 +7440,8 @@ def write(self, oprot): if self.primaryKeyCols is not None: oprot.writeFieldBegin('primaryKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeyCols)) - for iter319 in self.primaryKeyCols: - iter319.write(oprot) + for iter326 in self.primaryKeyCols: + iter326.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7181,11 +7495,87 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeyCols = [] - (_etype323, _size320) = iprot.readListBegin() - for _i324 in xrange(_size320): - _elem325 = SQLForeignKey() - _elem325.read(iprot) - self.foreignKeyCols.append(_elem325) + (_etype330, _size327) = iprot.readListBegin() + for _i331 in xrange(_size327): + _elem332 = SQLForeignKey() + _elem332.read(iprot) + self.foreignKeyCols.append(_elem332) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('AddForeignKeyRequest') + if self.foreignKeyCols is not None: + oprot.writeFieldBegin('foreignKeyCols', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeyCols)) + for iter333 in self.foreignKeyCols: + iter333.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.foreignKeyCols is None: + raise TProtocol.TProtocolException(message='Required field foreignKeyCols is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.foreignKeyCols) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class AddUniqueConstraintRequest: + """ + Attributes: + - uniqueConstraintCols + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'uniqueConstraintCols', (TType.STRUCT,(SQLUniqueConstraint, SQLUniqueConstraint.thrift_spec)), None, ), # 1 + ) + + def __init__(self, uniqueConstraintCols=None,): + self.uniqueConstraintCols = uniqueConstraintCols + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.uniqueConstraintCols = [] + (_etype337, _size334) = iprot.readListBegin() + for _i338 in xrange(_size334): + _elem339 = SQLUniqueConstraint() + _elem339.read(iprot) + self.uniqueConstraintCols.append(_elem339) iprot.readListEnd() else: iprot.skip(ftype) @@ -7198,26 +7588,26 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('AddForeignKeyRequest') - if self.foreignKeyCols is not None: - oprot.writeFieldBegin('foreignKeyCols', TType.LIST, 1) - oprot.writeListBegin(TType.STRUCT, len(self.foreignKeyCols)) - for iter326 in self.foreignKeyCols: - iter326.write(oprot) + oprot.writeStructBegin('AddUniqueConstraintRequest') + if self.uniqueConstraintCols is not None: + oprot.writeFieldBegin('uniqueConstraintCols', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraintCols)) + for iter340 in self.uniqueConstraintCols: + iter340.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.foreignKeyCols is None: - raise TProtocol.TProtocolException(message='Required field foreignKeyCols is unset!') + if self.uniqueConstraintCols is None: + raise TProtocol.TProtocolException(message='Required field uniqueConstraintCols is unset!') return def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.foreignKeyCols) + value = (value * 31) ^ hash(self.uniqueConstraintCols) return value def __repr__(self): @@ -7231,19 +7621,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class AddUniqueConstraintRequest: +class AddNotNullConstraintRequest: """ Attributes: - - uniqueConstraintCols + - notNullConstraintCols """ thrift_spec = ( None, # 0 - (1, TType.LIST, 'uniqueConstraintCols', (TType.STRUCT,(SQLUniqueConstraint, SQLUniqueConstraint.thrift_spec)), None, ), # 1 + (1, TType.LIST, 'notNullConstraintCols', (TType.STRUCT,(SQLNotNullConstraint, SQLNotNullConstraint.thrift_spec)), None, ), # 1 ) - def __init__(self, uniqueConstraintCols=None,): - self.uniqueConstraintCols = uniqueConstraintCols + def __init__(self, notNullConstraintCols=None,): + self.notNullConstraintCols = notNullConstraintCols def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7256,12 +7646,12 @@ def read(self, iprot): break if fid == 1: if ftype == TType.LIST: - self.uniqueConstraintCols = [] - (_etype330, _size327) = iprot.readListBegin() - for _i331 in xrange(_size327): - _elem332 = SQLUniqueConstraint() - _elem332.read(iprot) - self.uniqueConstraintCols.append(_elem332) + self.notNullConstraintCols = [] + (_etype344, _size341) = iprot.readListBegin() + for _i345 in xrange(_size341): + _elem346 = SQLNotNullConstraint() + _elem346.read(iprot) + self.notNullConstraintCols.append(_elem346) iprot.readListEnd() else: iprot.skip(ftype) @@ -7274,26 +7664,26 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('AddUniqueConstraintRequest') - if self.uniqueConstraintCols is not None: - oprot.writeFieldBegin('uniqueConstraintCols', TType.LIST, 1) - oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraintCols)) - for iter333 in self.uniqueConstraintCols: - iter333.write(oprot) + oprot.writeStructBegin('AddNotNullConstraintRequest') + if self.notNullConstraintCols is not None: + oprot.writeFieldBegin('notNullConstraintCols', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraintCols)) + for iter347 in self.notNullConstraintCols: + iter347.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.uniqueConstraintCols is None: - raise TProtocol.TProtocolException(message='Required field uniqueConstraintCols is unset!') + if self.notNullConstraintCols is None: + raise TProtocol.TProtocolException(message='Required field notNullConstraintCols is unset!') return def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.uniqueConstraintCols) + value = (value * 31) ^ hash(self.notNullConstraintCols) return value def __repr__(self): @@ -7307,19 +7697,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class AddNotNullConstraintRequest: +class AddDefaultConstraintRequest: """ Attributes: - - notNullConstraintCols + - defaultConstraintCols """ thrift_spec = ( None, # 0 - (1, TType.LIST, 'notNullConstraintCols', (TType.STRUCT,(SQLNotNullConstraint, SQLNotNullConstraint.thrift_spec)), None, ), # 1 + (1, TType.LIST, 'defaultConstraintCols', (TType.STRUCT,(SQLDefaultConstraint, SQLDefaultConstraint.thrift_spec)), None, ), # 1 ) - def __init__(self, notNullConstraintCols=None,): - self.notNullConstraintCols = notNullConstraintCols + def __init__(self, defaultConstraintCols=None,): + self.defaultConstraintCols = defaultConstraintCols def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7332,12 +7722,12 @@ def read(self, iprot): break if fid == 1: if ftype == TType.LIST: - self.notNullConstraintCols = [] - (_etype337, _size334) = iprot.readListBegin() - for _i338 in xrange(_size334): - _elem339 = SQLNotNullConstraint() - _elem339.read(iprot) - self.notNullConstraintCols.append(_elem339) + self.defaultConstraintCols = [] + (_etype351, _size348) = iprot.readListBegin() + for _i352 in xrange(_size348): + _elem353 = SQLDefaultConstraint() + _elem353.read(iprot) + self.defaultConstraintCols.append(_elem353) iprot.readListEnd() else: iprot.skip(ftype) @@ -7350,26 +7740,26 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('AddNotNullConstraintRequest') - if self.notNullConstraintCols is not None: - oprot.writeFieldBegin('notNullConstraintCols', TType.LIST, 1) - oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraintCols)) - for iter340 in self.notNullConstraintCols: - iter340.write(oprot) + oprot.writeStructBegin('AddDefaultConstraintRequest') + if self.defaultConstraintCols is not None: + oprot.writeFieldBegin('defaultConstraintCols', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraintCols)) + for iter354 in self.defaultConstraintCols: + iter354.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): - if self.notNullConstraintCols is None: - raise TProtocol.TProtocolException(message='Required field notNullConstraintCols is unset!') + if self.defaultConstraintCols is None: + raise TProtocol.TProtocolException(message='Required field defaultConstraintCols is unset!') return def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.notNullConstraintCols) + value = (value * 31) ^ hash(self.defaultConstraintCols) return value def __repr__(self): @@ -7412,11 +7802,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype344, _size341) = iprot.readListBegin() - for _i345 in xrange(_size341): - _elem346 = Partition() - _elem346.read(iprot) - self.partitions.append(_elem346) + (_etype358, _size355) = iprot.readListBegin() + for _i359 in xrange(_size355): + _elem360 = Partition() + _elem360.read(iprot) + self.partitions.append(_elem360) iprot.readListEnd() else: iprot.skip(ftype) @@ -7438,8 +7828,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter347 in self.partitions: - iter347.write(oprot) + for iter361 in self.partitions: + iter361.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.hasUnknownPartitions is not None: @@ -7623,11 +8013,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tableStats = [] - (_etype351, _size348) = iprot.readListBegin() - for _i352 in xrange(_size348): - _elem353 = ColumnStatisticsObj() - _elem353.read(iprot) - self.tableStats.append(_elem353) + (_etype365, _size362) = iprot.readListBegin() + for _i366 in xrange(_size362): + _elem367 = ColumnStatisticsObj() + _elem367.read(iprot) + self.tableStats.append(_elem367) iprot.readListEnd() else: iprot.skip(ftype) @@ -7644,8 +8034,8 @@ def write(self, oprot): if self.tableStats is not None: oprot.writeFieldBegin('tableStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) - for iter354 in self.tableStats: - iter354.write(oprot) + for iter368 in self.tableStats: + iter368.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7699,17 +8089,17 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partStats = {} - (_ktype356, _vtype357, _size355 ) = iprot.readMapBegin() - for _i359 in xrange(_size355): - _key360 = iprot.readString() - _val361 = [] - (_etype365, _size362) = iprot.readListBegin() - for _i366 in xrange(_size362): - _elem367 = ColumnStatisticsObj() - _elem367.read(iprot) - _val361.append(_elem367) + (_ktype370, _vtype371, _size369 ) = iprot.readMapBegin() + for _i373 in xrange(_size369): + _key374 = iprot.readString() + _val375 = [] + (_etype379, _size376) = iprot.readListBegin() + for _i380 in xrange(_size376): + _elem381 = ColumnStatisticsObj() + _elem381.read(iprot) + _val375.append(_elem381) iprot.readListEnd() - self.partStats[_key360] = _val361 + self.partStats[_key374] = _val375 iprot.readMapEnd() else: iprot.skip(ftype) @@ -7726,11 +8116,11 @@ def write(self, oprot): if self.partStats is not None: oprot.writeFieldBegin('partStats', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) - for kiter368,viter369 in self.partStats.items(): - oprot.writeString(kiter368) - oprot.writeListBegin(TType.STRUCT, len(viter369)) - for iter370 in viter369: - iter370.write(oprot) + for kiter382,viter383 in self.partStats.items(): + oprot.writeString(kiter382) + oprot.writeListBegin(TType.STRUCT, len(viter383)) + for iter384 in viter383: + iter384.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -7801,10 +8191,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype374, _size371) = iprot.readListBegin() - for _i375 in xrange(_size371): - _elem376 = iprot.readString() - self.colNames.append(_elem376) + (_etype388, _size385) = iprot.readListBegin() + for _i389 in xrange(_size385): + _elem390 = iprot.readString() + self.colNames.append(_elem390) iprot.readListEnd() else: iprot.skip(ftype) @@ -7829,8 +8219,8 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter377 in self.colNames: - oprot.writeString(iter377) + for iter391 in self.colNames: + oprot.writeString(iter391) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7909,20 +8299,20 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype381, _size378) = iprot.readListBegin() - for _i382 in xrange(_size378): - _elem383 = iprot.readString() - self.colNames.append(_elem383) + (_etype395, _size392) = iprot.readListBegin() + for _i396 in xrange(_size392): + _elem397 = iprot.readString() + self.colNames.append(_elem397) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.partNames = [] - (_etype387, _size384) = iprot.readListBegin() - for _i388 in xrange(_size384): - _elem389 = iprot.readString() - self.partNames.append(_elem389) + (_etype401, _size398) = iprot.readListBegin() + for _i402 in xrange(_size398): + _elem403 = iprot.readString() + self.partNames.append(_elem403) iprot.readListEnd() else: iprot.skip(ftype) @@ -7947,15 +8337,15 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter390 in self.colNames: - oprot.writeString(iter390) + for iter404 in self.colNames: + oprot.writeString(iter404) oprot.writeListEnd() oprot.writeFieldEnd() if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter391 in self.partNames: - oprot.writeString(iter391) + for iter405 in self.partNames: + oprot.writeString(iter405) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8018,11 +8408,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype395, _size392) = iprot.readListBegin() - for _i396 in xrange(_size392): - _elem397 = Partition() - _elem397.read(iprot) - self.partitions.append(_elem397) + (_etype409, _size406) = iprot.readListBegin() + for _i410 in xrange(_size406): + _elem411 = Partition() + _elem411.read(iprot) + self.partitions.append(_elem411) iprot.readListEnd() else: iprot.skip(ftype) @@ -8039,8 +8429,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter398 in self.partitions: - iter398.write(oprot) + for iter412 in self.partitions: + iter412.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8114,11 +8504,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.parts = [] - (_etype402, _size399) = iprot.readListBegin() - for _i403 in xrange(_size399): - _elem404 = Partition() - _elem404.read(iprot) - self.parts.append(_elem404) + (_etype416, _size413) = iprot.readListBegin() + for _i417 in xrange(_size413): + _elem418 = Partition() + _elem418.read(iprot) + self.parts.append(_elem418) iprot.readListEnd() else: iprot.skip(ftype) @@ -8153,8 +8543,8 @@ def write(self, oprot): if self.parts is not None: oprot.writeFieldBegin('parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.parts)) - for iter405 in self.parts: - iter405.write(oprot) + for iter419 in self.parts: + iter419.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ifNotExists is not None: @@ -8226,11 +8616,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype409, _size406) = iprot.readListBegin() - for _i410 in xrange(_size406): - _elem411 = Partition() - _elem411.read(iprot) - self.partitions.append(_elem411) + (_etype423, _size420) = iprot.readListBegin() + for _i424 in xrange(_size420): + _elem425 = Partition() + _elem425.read(iprot) + self.partitions.append(_elem425) iprot.readListEnd() else: iprot.skip(ftype) @@ -8247,8 +8637,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter412 in self.partitions: - iter412.write(oprot) + for iter426 in self.partitions: + iter426.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8383,21 +8773,21 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.names = [] - (_etype416, _size413) = iprot.readListBegin() - for _i417 in xrange(_size413): - _elem418 = iprot.readString() - self.names.append(_elem418) + (_etype430, _size427) = iprot.readListBegin() + for _i431 in xrange(_size427): + _elem432 = iprot.readString() + self.names.append(_elem432) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.exprs = [] - (_etype422, _size419) = iprot.readListBegin() - for _i423 in xrange(_size419): - _elem424 = DropPartitionsExpr() - _elem424.read(iprot) - self.exprs.append(_elem424) + (_etype436, _size433) = iprot.readListBegin() + for _i437 in xrange(_size433): + _elem438 = DropPartitionsExpr() + _elem438.read(iprot) + self.exprs.append(_elem438) iprot.readListEnd() else: iprot.skip(ftype) @@ -8414,15 +8804,15 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter425 in self.names: - oprot.writeString(iter425) + for iter439 in self.names: + oprot.writeString(iter439) oprot.writeListEnd() oprot.writeFieldEnd() if self.exprs is not None: oprot.writeFieldBegin('exprs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.exprs)) - for iter426 in self.exprs: - iter426.write(oprot) + for iter440 in self.exprs: + iter440.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8670,11 +9060,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partitionKeys = [] - (_etype430, _size427) = iprot.readListBegin() - for _i431 in xrange(_size427): - _elem432 = FieldSchema() - _elem432.read(iprot) - self.partitionKeys.append(_elem432) + (_etype444, _size441) = iprot.readListBegin() + for _i445 in xrange(_size441): + _elem446 = FieldSchema() + _elem446.read(iprot) + self.partitionKeys.append(_elem446) iprot.readListEnd() else: iprot.skip(ftype) @@ -8691,11 +9081,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionOrder = [] - (_etype436, _size433) = iprot.readListBegin() - for _i437 in xrange(_size433): - _elem438 = FieldSchema() - _elem438.read(iprot) - self.partitionOrder.append(_elem438) + (_etype450, _size447) = iprot.readListBegin() + for _i451 in xrange(_size447): + _elem452 = FieldSchema() + _elem452.read(iprot) + self.partitionOrder.append(_elem452) iprot.readListEnd() else: iprot.skip(ftype) @@ -8730,8 +9120,8 @@ def write(self, oprot): if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter439 in self.partitionKeys: - iter439.write(oprot) + for iter453 in self.partitionKeys: + iter453.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.applyDistinct is not None: @@ -8745,8 +9135,8 @@ def write(self, oprot): if self.partitionOrder is not None: oprot.writeFieldBegin('partitionOrder', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.partitionOrder)) - for iter440 in self.partitionOrder: - iter440.write(oprot) + for iter454 in self.partitionOrder: + iter454.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ascending is not None: @@ -8819,10 +9209,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.row = [] - (_etype444, _size441) = iprot.readListBegin() - for _i445 in xrange(_size441): - _elem446 = iprot.readString() - self.row.append(_elem446) + (_etype458, _size455) = iprot.readListBegin() + for _i459 in xrange(_size455): + _elem460 = iprot.readString() + self.row.append(_elem460) iprot.readListEnd() else: iprot.skip(ftype) @@ -8839,8 +9229,8 @@ def write(self, oprot): if self.row is not None: oprot.writeFieldBegin('row', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.row)) - for iter447 in self.row: - oprot.writeString(iter447) + for iter461 in self.row: + oprot.writeString(iter461) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8894,11 +9284,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionValues = [] - (_etype451, _size448) = iprot.readListBegin() - for _i452 in xrange(_size448): - _elem453 = PartitionValuesRow() - _elem453.read(iprot) - self.partitionValues.append(_elem453) + (_etype465, _size462) = iprot.readListBegin() + for _i466 in xrange(_size462): + _elem467 = PartitionValuesRow() + _elem467.read(iprot) + self.partitionValues.append(_elem467) iprot.readListEnd() else: iprot.skip(ftype) @@ -8915,8 +9305,8 @@ def write(self, oprot): if self.partitionValues is not None: oprot.writeFieldBegin('partitionValues', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionValues)) - for iter454 in self.partitionValues: - iter454.write(oprot) + for iter468 in self.partitionValues: + iter468.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9104,11 +9494,11 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.resourceUris = [] - (_etype458, _size455) = iprot.readListBegin() - for _i459 in xrange(_size455): - _elem460 = ResourceUri() - _elem460.read(iprot) - self.resourceUris.append(_elem460) + (_etype472, _size469) = iprot.readListBegin() + for _i473 in xrange(_size469): + _elem474 = ResourceUri() + _elem474.read(iprot) + self.resourceUris.append(_elem474) iprot.readListEnd() else: iprot.skip(ftype) @@ -9153,8 +9543,8 @@ def write(self, oprot): if self.resourceUris is not None: oprot.writeFieldBegin('resourceUris', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) - for iter461 in self.resourceUris: - iter461.write(oprot) + for iter475 in self.resourceUris: + iter475.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9398,11 +9788,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype465, _size462) = iprot.readListBegin() - for _i466 in xrange(_size462): - _elem467 = TxnInfo() - _elem467.read(iprot) - self.open_txns.append(_elem467) + (_etype479, _size476) = iprot.readListBegin() + for _i480 in xrange(_size476): + _elem481 = TxnInfo() + _elem481.read(iprot) + self.open_txns.append(_elem481) iprot.readListEnd() else: iprot.skip(ftype) @@ -9423,8 +9813,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) - for iter468 in self.open_txns: - iter468.write(oprot) + for iter482 in self.open_txns: + iter482.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9495,10 +9885,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype472, _size469) = iprot.readListBegin() - for _i473 in xrange(_size469): - _elem474 = iprot.readI64() - self.open_txns.append(_elem474) + (_etype486, _size483) = iprot.readListBegin() + for _i487 in xrange(_size483): + _elem488 = iprot.readI64() + self.open_txns.append(_elem488) iprot.readListEnd() else: iprot.skip(ftype) @@ -9529,8 +9919,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.I64, len(self.open_txns)) - for iter475 in self.open_txns: - oprot.writeI64(iter475) + for iter489 in self.open_txns: + oprot.writeI64(iter489) oprot.writeListEnd() oprot.writeFieldEnd() if self.min_open_txn is not None: @@ -9709,10 +10099,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype479, _size476) = iprot.readListBegin() - for _i480 in xrange(_size476): - _elem481 = iprot.readI64() - self.txn_ids.append(_elem481) + (_etype493, _size490) = iprot.readListBegin() + for _i494 in xrange(_size490): + _elem495 = iprot.readI64() + self.txn_ids.append(_elem495) iprot.readListEnd() else: iprot.skip(ftype) @@ -9729,8 +10119,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter482 in self.txn_ids: - oprot.writeI64(iter482) + for iter496 in self.txn_ids: + oprot.writeI64(iter496) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9851,10 +10241,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype486, _size483) = iprot.readListBegin() - for _i487 in xrange(_size483): - _elem488 = iprot.readI64() - self.txn_ids.append(_elem488) + (_etype500, _size497) = iprot.readListBegin() + for _i501 in xrange(_size497): + _elem502 = iprot.readI64() + self.txn_ids.append(_elem502) iprot.readListEnd() else: iprot.skip(ftype) @@ -9871,8 +10261,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter489 in self.txn_ids: - oprot.writeI64(iter489) + for iter503 in self.txn_ids: + oprot.writeI64(iter503) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9996,10 +10386,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fullTableNames = [] - (_etype493, _size490) = iprot.readListBegin() - for _i494 in xrange(_size490): - _elem495 = iprot.readString() - self.fullTableNames.append(_elem495) + (_etype507, _size504) = iprot.readListBegin() + for _i508 in xrange(_size504): + _elem509 = iprot.readString() + self.fullTableNames.append(_elem509) iprot.readListEnd() else: iprot.skip(ftype) @@ -10021,8 +10411,8 @@ def write(self, oprot): if self.fullTableNames is not None: oprot.writeFieldBegin('fullTableNames', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fullTableNames)) - for iter496 in self.fullTableNames: - oprot.writeString(iter496) + for iter510 in self.fullTableNames: + oprot.writeString(iter510) oprot.writeListEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -10105,10 +10495,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.invalidWriteIds = [] - (_etype500, _size497) = iprot.readListBegin() - for _i501 in xrange(_size497): - _elem502 = iprot.readI64() - self.invalidWriteIds.append(_elem502) + (_etype514, _size511) = iprot.readListBegin() + for _i515 in xrange(_size511): + _elem516 = iprot.readI64() + self.invalidWriteIds.append(_elem516) iprot.readListEnd() else: iprot.skip(ftype) @@ -10143,8 +10533,8 @@ def write(self, oprot): if self.invalidWriteIds is not None: oprot.writeFieldBegin('invalidWriteIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.invalidWriteIds)) - for iter503 in self.invalidWriteIds: - oprot.writeI64(iter503) + for iter517 in self.invalidWriteIds: + oprot.writeI64(iter517) oprot.writeListEnd() oprot.writeFieldEnd() if self.minOpenWriteId is not None: @@ -10216,11 +10606,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tblValidWriteIds = [] - (_etype507, _size504) = iprot.readListBegin() - for _i508 in xrange(_size504): - _elem509 = TableValidWriteIds() - _elem509.read(iprot) - self.tblValidWriteIds.append(_elem509) + (_etype521, _size518) = iprot.readListBegin() + for _i522 in xrange(_size518): + _elem523 = TableValidWriteIds() + _elem523.read(iprot) + self.tblValidWriteIds.append(_elem523) iprot.readListEnd() else: iprot.skip(ftype) @@ -10237,8 +10627,8 @@ def write(self, oprot): if self.tblValidWriteIds is not None: oprot.writeFieldBegin('tblValidWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) - for iter510 in self.tblValidWriteIds: - iter510.write(oprot) + for iter524 in self.tblValidWriteIds: + iter524.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10298,10 +10688,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnIds = [] - (_etype514, _size511) = iprot.readListBegin() - for _i515 in xrange(_size511): - _elem516 = iprot.readI64() - self.txnIds.append(_elem516) + (_etype528, _size525) = iprot.readListBegin() + for _i529 in xrange(_size525): + _elem530 = iprot.readI64() + self.txnIds.append(_elem530) iprot.readListEnd() else: iprot.skip(ftype) @@ -10328,8 +10718,8 @@ def write(self, oprot): if self.txnIds is not None: oprot.writeFieldBegin('txnIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txnIds)) - for iter517 in self.txnIds: - oprot.writeI64(iter517) + for iter531 in self.txnIds: + oprot.writeI64(iter531) oprot.writeListEnd() oprot.writeFieldEnd() if self.dbName is not None: @@ -10479,11 +10869,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnToWriteIds = [] - (_etype521, _size518) = iprot.readListBegin() - for _i522 in xrange(_size518): - _elem523 = TxnToWriteId() - _elem523.read(iprot) - self.txnToWriteIds.append(_elem523) + (_etype535, _size532) = iprot.readListBegin() + for _i536 in xrange(_size532): + _elem537 = TxnToWriteId() + _elem537.read(iprot) + self.txnToWriteIds.append(_elem537) iprot.readListEnd() else: iprot.skip(ftype) @@ -10500,8 +10890,8 @@ def write(self, oprot): if self.txnToWriteIds is not None: oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) - for iter524 in self.txnToWriteIds: - iter524.write(oprot) + for iter538 in self.txnToWriteIds: + iter538.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10729,11 +11119,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype528, _size525) = iprot.readListBegin() - for _i529 in xrange(_size525): - _elem530 = LockComponent() - _elem530.read(iprot) - self.component.append(_elem530) + (_etype542, _size539) = iprot.readListBegin() + for _i543 in xrange(_size539): + _elem544 = LockComponent() + _elem544.read(iprot) + self.component.append(_elem544) iprot.readListEnd() else: iprot.skip(ftype) @@ -10770,8 +11160,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter531 in self.component: - iter531.write(oprot) + for iter545 in self.component: + iter545.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -11469,11 +11859,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype535, _size532) = iprot.readListBegin() - for _i536 in xrange(_size532): - _elem537 = ShowLocksResponseElement() - _elem537.read(iprot) - self.locks.append(_elem537) + (_etype549, _size546) = iprot.readListBegin() + for _i550 in xrange(_size546): + _elem551 = ShowLocksResponseElement() + _elem551.read(iprot) + self.locks.append(_elem551) iprot.readListEnd() else: iprot.skip(ftype) @@ -11490,8 +11880,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter538 in self.locks: - iter538.write(oprot) + for iter552 in self.locks: + iter552.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11706,20 +12096,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype542, _size539) = iprot.readSetBegin() - for _i543 in xrange(_size539): - _elem544 = iprot.readI64() - self.aborted.add(_elem544) + (_etype556, _size553) = iprot.readSetBegin() + for _i557 in xrange(_size553): + _elem558 = iprot.readI64() + self.aborted.add(_elem558) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype548, _size545) = iprot.readSetBegin() - for _i549 in xrange(_size545): - _elem550 = iprot.readI64() - self.nosuch.add(_elem550) + (_etype562, _size559) = iprot.readSetBegin() + for _i563 in xrange(_size559): + _elem564 = iprot.readI64() + self.nosuch.add(_elem564) iprot.readSetEnd() else: iprot.skip(ftype) @@ -11736,15 +12126,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter551 in self.aborted: - oprot.writeI64(iter551) + for iter565 in self.aborted: + oprot.writeI64(iter565) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter552 in self.nosuch: - oprot.writeI64(iter552) + for iter566 in self.nosuch: + oprot.writeI64(iter566) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11841,11 +12231,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype554, _vtype555, _size553 ) = iprot.readMapBegin() - for _i557 in xrange(_size553): - _key558 = iprot.readString() - _val559 = iprot.readString() - self.properties[_key558] = _val559 + (_ktype568, _vtype569, _size567 ) = iprot.readMapBegin() + for _i571 in xrange(_size567): + _key572 = iprot.readString() + _val573 = iprot.readString() + self.properties[_key572] = _val573 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11882,9 +12272,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter560,viter561 in self.properties.items(): - oprot.writeString(kiter560) - oprot.writeString(viter561) + for kiter574,viter575 in self.properties.items(): + oprot.writeString(kiter574) + oprot.writeString(viter575) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12319,11 +12709,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype565, _size562) = iprot.readListBegin() - for _i566 in xrange(_size562): - _elem567 = ShowCompactResponseElement() - _elem567.read(iprot) - self.compacts.append(_elem567) + (_etype579, _size576) = iprot.readListBegin() + for _i580 in xrange(_size576): + _elem581 = ShowCompactResponseElement() + _elem581.read(iprot) + self.compacts.append(_elem581) iprot.readListEnd() else: iprot.skip(ftype) @@ -12340,8 +12730,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter568 in self.compacts: - iter568.write(oprot) + for iter582 in self.compacts: + iter582.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12430,10 +12820,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionnames = [] - (_etype572, _size569) = iprot.readListBegin() - for _i573 in xrange(_size569): - _elem574 = iprot.readString() - self.partitionnames.append(_elem574) + (_etype586, _size583) = iprot.readListBegin() + for _i587 in xrange(_size583): + _elem588 = iprot.readString() + self.partitionnames.append(_elem588) iprot.readListEnd() else: iprot.skip(ftype) @@ -12471,8 +12861,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter575 in self.partitionnames: - oprot.writeString(iter575) + for iter589 in self.partitionnames: + oprot.writeString(iter589) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -12694,10 +13084,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.SET: self.tablesUsed = set() - (_etype579, _size576) = iprot.readSetBegin() - for _i580 in xrange(_size576): - _elem581 = iprot.readString() - self.tablesUsed.add(_elem581) + (_etype593, _size590) = iprot.readSetBegin() + for _i594 in xrange(_size590): + _elem595 = iprot.readString() + self.tablesUsed.add(_elem595) iprot.readSetEnd() else: iprot.skip(ftype) @@ -12727,8 +13117,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 3) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter582 in self.tablesUsed: - oprot.writeString(iter582) + for iter596 in self.tablesUsed: + oprot.writeString(iter596) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -13024,11 +13414,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype586, _size583) = iprot.readListBegin() - for _i587 in xrange(_size583): - _elem588 = NotificationEvent() - _elem588.read(iprot) - self.events.append(_elem588) + (_etype600, _size597) = iprot.readListBegin() + for _i601 in xrange(_size597): + _elem602 = NotificationEvent() + _elem602.read(iprot) + self.events.append(_elem602) iprot.readListEnd() else: iprot.skip(ftype) @@ -13045,8 +13435,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter589 in self.events: - iter589.write(oprot) + for iter603 in self.events: + iter603.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13327,20 +13717,20 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype593, _size590) = iprot.readListBegin() - for _i594 in xrange(_size590): - _elem595 = iprot.readString() - self.filesAdded.append(_elem595) + (_etype607, _size604) = iprot.readListBegin() + for _i608 in xrange(_size604): + _elem609 = iprot.readString() + self.filesAdded.append(_elem609) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype599, _size596) = iprot.readListBegin() - for _i600 in xrange(_size596): - _elem601 = iprot.readString() - self.filesAddedChecksum.append(_elem601) + (_etype613, _size610) = iprot.readListBegin() + for _i614 in xrange(_size610): + _elem615 = iprot.readString() + self.filesAddedChecksum.append(_elem615) iprot.readListEnd() else: iprot.skip(ftype) @@ -13361,15 +13751,15 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter602 in self.filesAdded: - oprot.writeString(iter602) + for iter616 in self.filesAdded: + oprot.writeString(iter616) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter603 in self.filesAddedChecksum: - oprot.writeString(iter603) + for iter617 in self.filesAddedChecksum: + oprot.writeString(iter617) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13524,10 +13914,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype607, _size604) = iprot.readListBegin() - for _i608 in xrange(_size604): - _elem609 = iprot.readString() - self.partitionVals.append(_elem609) + (_etype621, _size618) = iprot.readListBegin() + for _i622 in xrange(_size618): + _elem623 = iprot.readString() + self.partitionVals.append(_elem623) iprot.readListEnd() else: iprot.skip(ftype) @@ -13560,8 +13950,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter610 in self.partitionVals: - oprot.writeString(iter610) + for iter624 in self.partitionVals: + oprot.writeString(iter624) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13748,12 +14138,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype612, _vtype613, _size611 ) = iprot.readMapBegin() - for _i615 in xrange(_size611): - _key616 = iprot.readI64() - _val617 = MetadataPpdResult() - _val617.read(iprot) - self.metadata[_key616] = _val617 + (_ktype626, _vtype627, _size625 ) = iprot.readMapBegin() + for _i629 in xrange(_size625): + _key630 = iprot.readI64() + _val631 = MetadataPpdResult() + _val631.read(iprot) + self.metadata[_key630] = _val631 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13775,9 +14165,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter618,viter619 in self.metadata.items(): - oprot.writeI64(kiter618) - viter619.write(oprot) + for kiter632,viter633 in self.metadata.items(): + oprot.writeI64(kiter632) + viter633.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -13847,10 +14237,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype623, _size620) = iprot.readListBegin() - for _i624 in xrange(_size620): - _elem625 = iprot.readI64() - self.fileIds.append(_elem625) + (_etype637, _size634) = iprot.readListBegin() + for _i638 in xrange(_size634): + _elem639 = iprot.readI64() + self.fileIds.append(_elem639) iprot.readListEnd() else: iprot.skip(ftype) @@ -13882,8 +14272,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter626 in self.fileIds: - oprot.writeI64(iter626) + for iter640 in self.fileIds: + oprot.writeI64(iter640) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -13957,11 +14347,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype628, _vtype629, _size627 ) = iprot.readMapBegin() - for _i631 in xrange(_size627): - _key632 = iprot.readI64() - _val633 = iprot.readString() - self.metadata[_key632] = _val633 + (_ktype642, _vtype643, _size641 ) = iprot.readMapBegin() + for _i645 in xrange(_size641): + _key646 = iprot.readI64() + _val647 = iprot.readString() + self.metadata[_key646] = _val647 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13983,9 +14373,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter634,viter635 in self.metadata.items(): - oprot.writeI64(kiter634) - oprot.writeString(viter635) + for kiter648,viter649 in self.metadata.items(): + oprot.writeI64(kiter648) + oprot.writeString(viter649) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -14046,10 +14436,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype639, _size636) = iprot.readListBegin() - for _i640 in xrange(_size636): - _elem641 = iprot.readI64() - self.fileIds.append(_elem641) + (_etype653, _size650) = iprot.readListBegin() + for _i654 in xrange(_size650): + _elem655 = iprot.readI64() + self.fileIds.append(_elem655) iprot.readListEnd() else: iprot.skip(ftype) @@ -14066,8 +14456,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter642 in self.fileIds: - oprot.writeI64(iter642) + for iter656 in self.fileIds: + oprot.writeI64(iter656) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14173,20 +14563,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype646, _size643) = iprot.readListBegin() - for _i647 in xrange(_size643): - _elem648 = iprot.readI64() - self.fileIds.append(_elem648) + (_etype660, _size657) = iprot.readListBegin() + for _i661 in xrange(_size657): + _elem662 = iprot.readI64() + self.fileIds.append(_elem662) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype652, _size649) = iprot.readListBegin() - for _i653 in xrange(_size649): - _elem654 = iprot.readString() - self.metadata.append(_elem654) + (_etype666, _size663) = iprot.readListBegin() + for _i667 in xrange(_size663): + _elem668 = iprot.readString() + self.metadata.append(_elem668) iprot.readListEnd() else: iprot.skip(ftype) @@ -14208,15 +14598,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter655 in self.fileIds: - oprot.writeI64(iter655) + for iter669 in self.fileIds: + oprot.writeI64(iter669) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter656 in self.metadata: - oprot.writeString(iter656) + for iter670 in self.metadata: + oprot.writeString(iter670) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -14324,10 +14714,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype660, _size657) = iprot.readListBegin() - for _i661 in xrange(_size657): - _elem662 = iprot.readI64() - self.fileIds.append(_elem662) + (_etype674, _size671) = iprot.readListBegin() + for _i675 in xrange(_size671): + _elem676 = iprot.readI64() + self.fileIds.append(_elem676) iprot.readListEnd() else: iprot.skip(ftype) @@ -14344,8 +14734,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter663 in self.fileIds: - oprot.writeI64(iter663) + for iter677 in self.fileIds: + oprot.writeI64(iter677) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14574,11 +14964,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype667, _size664) = iprot.readListBegin() - for _i668 in xrange(_size664): - _elem669 = Function() - _elem669.read(iprot) - self.functions.append(_elem669) + (_etype681, _size678) = iprot.readListBegin() + for _i682 in xrange(_size678): + _elem683 = Function() + _elem683.read(iprot) + self.functions.append(_elem683) iprot.readListEnd() else: iprot.skip(ftype) @@ -14595,8 +14985,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter670 in self.functions: - iter670.write(oprot) + for iter684 in self.functions: + iter684.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14648,10 +15038,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype674, _size671) = iprot.readListBegin() - for _i675 in xrange(_size671): - _elem676 = iprot.readI32() - self.values.append(_elem676) + (_etype688, _size685) = iprot.readListBegin() + for _i689 in xrange(_size685): + _elem690 = iprot.readI32() + self.values.append(_elem690) iprot.readListEnd() else: iprot.skip(ftype) @@ -14668,8 +15058,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter677 in self.values: - oprot.writeI32(iter677) + for iter691 in self.values: + oprot.writeI32(iter691) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14898,10 +15288,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype681, _size678) = iprot.readListBegin() - for _i682 in xrange(_size678): - _elem683 = iprot.readString() - self.tblNames.append(_elem683) + (_etype695, _size692) = iprot.readListBegin() + for _i696 in xrange(_size692): + _elem697 = iprot.readString() + self.tblNames.append(_elem697) iprot.readListEnd() else: iprot.skip(ftype) @@ -14928,8 +15318,8 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter684 in self.tblNames: - oprot.writeString(iter684) + for iter698 in self.tblNames: + oprot.writeString(iter698) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: @@ -14989,11 +15379,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype688, _size685) = iprot.readListBegin() - for _i689 in xrange(_size685): - _elem690 = Table() - _elem690.read(iprot) - self.tables.append(_elem690) + (_etype702, _size699) = iprot.readListBegin() + for _i703 in xrange(_size699): + _elem704 = Table() + _elem704.read(iprot) + self.tables.append(_elem704) iprot.readListEnd() else: iprot.skip(ftype) @@ -15010,8 +15400,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter691 in self.tables: - iter691.write(oprot) + for iter705 in self.tables: + iter705.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15309,10 +15699,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.tablesUsed = set() - (_etype695, _size692) = iprot.readSetBegin() - for _i696 in xrange(_size692): - _elem697 = iprot.readString() - self.tablesUsed.add(_elem697) + (_etype709, _size706) = iprot.readSetBegin() + for _i710 in xrange(_size706): + _elem711 = iprot.readString() + self.tablesUsed.add(_elem711) iprot.readSetEnd() else: iprot.skip(ftype) @@ -15339,8 +15729,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 1) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter698 in self.tablesUsed: - oprot.writeString(iter698) + for iter712 in self.tablesUsed: + oprot.writeString(iter712) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -16244,44 +16634,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype702, _size699) = iprot.readListBegin() - for _i703 in xrange(_size699): - _elem704 = WMPool() - _elem704.read(iprot) - self.pools.append(_elem704) + (_etype716, _size713) = iprot.readListBegin() + for _i717 in xrange(_size713): + _elem718 = WMPool() + _elem718.read(iprot) + self.pools.append(_elem718) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype708, _size705) = iprot.readListBegin() - for _i709 in xrange(_size705): - _elem710 = WMMapping() - _elem710.read(iprot) - self.mappings.append(_elem710) + (_etype722, _size719) = iprot.readListBegin() + for _i723 in xrange(_size719): + _elem724 = WMMapping() + _elem724.read(iprot) + self.mappings.append(_elem724) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype714, _size711) = iprot.readListBegin() - for _i715 in xrange(_size711): - _elem716 = WMTrigger() - _elem716.read(iprot) - self.triggers.append(_elem716) + (_etype728, _size725) = iprot.readListBegin() + for _i729 in xrange(_size725): + _elem730 = WMTrigger() + _elem730.read(iprot) + self.triggers.append(_elem730) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype720, _size717) = iprot.readListBegin() - for _i721 in xrange(_size717): - _elem722 = WMPoolTrigger() - _elem722.read(iprot) - self.poolTriggers.append(_elem722) + (_etype734, _size731) = iprot.readListBegin() + for _i735 in xrange(_size731): + _elem736 = WMPoolTrigger() + _elem736.read(iprot) + self.poolTriggers.append(_elem736) iprot.readListEnd() else: iprot.skip(ftype) @@ -16302,29 +16692,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter723 in self.pools: - iter723.write(oprot) + for iter737 in self.pools: + iter737.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter724 in self.mappings: - iter724.write(oprot) + for iter738 in self.mappings: + iter738.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter725 in self.triggers: - iter725.write(oprot) + for iter739 in self.triggers: + iter739.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter726 in self.poolTriggers: - iter726.write(oprot) + for iter740 in self.poolTriggers: + iter740.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16798,11 +17188,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype730, _size727) = iprot.readListBegin() - for _i731 in xrange(_size727): - _elem732 = WMResourcePlan() - _elem732.read(iprot) - self.resourcePlans.append(_elem732) + (_etype744, _size741) = iprot.readListBegin() + for _i745 in xrange(_size741): + _elem746 = WMResourcePlan() + _elem746.read(iprot) + self.resourcePlans.append(_elem746) iprot.readListEnd() else: iprot.skip(ftype) @@ -16819,8 +17209,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter733 in self.resourcePlans: - iter733.write(oprot) + for iter747 in self.resourcePlans: + iter747.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17124,20 +17514,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype737, _size734) = iprot.readListBegin() - for _i738 in xrange(_size734): - _elem739 = iprot.readString() - self.errors.append(_elem739) + (_etype751, _size748) = iprot.readListBegin() + for _i752 in xrange(_size748): + _elem753 = iprot.readString() + self.errors.append(_elem753) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype743, _size740) = iprot.readListBegin() - for _i744 in xrange(_size740): - _elem745 = iprot.readString() - self.warnings.append(_elem745) + (_etype757, _size754) = iprot.readListBegin() + for _i758 in xrange(_size754): + _elem759 = iprot.readString() + self.warnings.append(_elem759) iprot.readListEnd() else: iprot.skip(ftype) @@ -17154,15 +17544,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter746 in self.errors: - oprot.writeString(iter746) + for iter760 in self.errors: + oprot.writeString(iter760) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter747 in self.warnings: - oprot.writeString(iter747) + for iter761 in self.warnings: + oprot.writeString(iter761) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17739,11 +18129,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype751, _size748) = iprot.readListBegin() - for _i752 in xrange(_size748): - _elem753 = WMTrigger() - _elem753.read(iprot) - self.triggers.append(_elem753) + (_etype765, _size762) = iprot.readListBegin() + for _i766 in xrange(_size762): + _elem767 = WMTrigger() + _elem767.read(iprot) + self.triggers.append(_elem767) iprot.readListEnd() else: iprot.skip(ftype) @@ -17760,8 +18150,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter754 in self.triggers: - iter754.write(oprot) + for iter768 in self.triggers: + iter768.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 625baae566..8438728caa 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -308,6 +308,36 @@ class SQLNotNullConstraint ::Thrift::Struct.generate_accessors self end +class SQLDefaultConstraint + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLE_DB = 1 + TABLE_NAME = 2 + COLUMN_NAME = 3 + DEFAULT_VALUE = 4 + DC_NAME = 5 + ENABLE_CSTR = 6 + VALIDATE_CSTR = 7 + RELY_CSTR = 8 + + FIELDS = { + TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, + TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, + COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, + DEFAULT_VALUE => {:type => ::Thrift::Types::STRING, :name => 'default_value'}, + DC_NAME => {:type => ::Thrift::Types::STRING, :name => 'dc_name'}, + ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'}, + VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'}, + RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class Type include ::Thrift::Struct, ::Thrift::Struct_Union NAME = 1 @@ -1557,6 +1587,43 @@ class NotNullConstraintsResponse ::Thrift::Struct.generate_accessors self end +class DefaultConstraintsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DB_NAME = 1 + TBL_NAME = 2 + + FIELDS = { + DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name + end + + ::Thrift::Struct.generate_accessors self +end + +class DefaultConstraintsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + DEFAULTCONSTRAINTS = 1 + + FIELDS = { + DEFAULTCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'defaultConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLDefaultConstraint}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field defaultConstraints is unset!') unless @defaultConstraints + end + + ::Thrift::Struct.generate_accessors self +end + class DropConstraintRequest include ::Thrift::Struct, ::Thrift::Struct_Union DBNAME = 1 @@ -1648,6 +1715,23 @@ class AddNotNullConstraintRequest ::Thrift::Struct.generate_accessors self end +class AddDefaultConstraintRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DEFAULTCONSTRAINTCOLS = 1 + + FIELDS = { + DEFAULTCONSTRAINTCOLS => {:type => ::Thrift::Types::LIST, :name => 'defaultConstraintCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLDefaultConstraint}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field defaultConstraintCols is unset!') unless @defaultConstraintCols + end + + ::Thrift::Struct.generate_accessors self +end + class PartitionsByExprResult include ::Thrift::Struct, ::Thrift::Struct_Union PARTITIONS = 1 diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 167bda29ac..b0e84d9d25 100644 --- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -318,13 +318,13 @@ module ThriftHiveMetastore return end - def create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints) - send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints) + def create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints) + send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints) recv_create_table_with_constraints() end - def send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints) - send_message('create_table_with_constraints', Create_table_with_constraints_args, :tbl => tbl, :primaryKeys => primaryKeys, :foreignKeys => foreignKeys, :uniqueConstraints => uniqueConstraints, :notNullConstraints => notNullConstraints) + def send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints) + send_message('create_table_with_constraints', Create_table_with_constraints_args, :tbl => tbl, :primaryKeys => primaryKeys, :foreignKeys => foreignKeys, :uniqueConstraints => uniqueConstraints, :notNullConstraints => notNullConstraints, :defaultConstraints => defaultConstraints) end def recv_create_table_with_constraints() @@ -416,6 +416,22 @@ module ThriftHiveMetastore return end + def add_default_constraint(req) + send_add_default_constraint(req) + recv_add_default_constraint() + end + + def send_add_default_constraint(req) + send_message('add_default_constraint', Add_default_constraint_args, :req => req) + end + + def recv_add_default_constraint() + result = receive_message(Add_default_constraint_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + return + end + def drop_table(dbname, name, deleteData) send_drop_table(dbname, name, deleteData) recv_drop_table() @@ -1622,6 +1638,23 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_not_null_constraints failed: unknown result') end + def get_default_constraints(request) + send_get_default_constraints(request) + return recv_get_default_constraints() + end + + def send_get_default_constraints(request) + send_message('get_default_constraints', Get_default_constraints_args, :request => request) + end + + def recv_get_default_constraints() + result = receive_message(Get_default_constraints_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_default_constraints failed: unknown result') + end + def update_table_column_statistics(stats_obj) send_update_table_column_statistics(stats_obj) return recv_update_table_column_statistics() @@ -3338,7 +3371,7 @@ module ThriftHiveMetastore args = read_args(iprot, Create_table_with_constraints_args) result = Create_table_with_constraints_result.new() begin - @handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints) + @handler.create_table_with_constraints(args.tbl, args.primaryKeys, args.foreignKeys, args.uniqueConstraints, args.notNullConstraints, args.defaultConstraints) rescue ::AlreadyExistsException => o1 result.o1 = o1 rescue ::InvalidObjectException => o2 @@ -3416,6 +3449,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'add_not_null_constraint', seqid) end + def process_add_default_constraint(seqid, iprot, oprot) + args = read_args(iprot, Add_default_constraint_args) + result = Add_default_constraint_result.new() + begin + @handler.add_default_constraint(args.req) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'add_default_constraint', seqid) + end + def process_drop_table(seqid, iprot, oprot) args = read_args(iprot, Drop_table_args) result = Drop_table_result.new() @@ -4363,6 +4409,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_not_null_constraints', seqid) end + def process_get_default_constraints(seqid, iprot, oprot) + args = read_args(iprot, Get_default_constraints_args) + result = Get_default_constraints_result.new() + begin + result.success = @handler.get_default_constraints(args.request) + rescue ::MetaException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_default_constraints', seqid) + end + def process_update_table_column_statistics(seqid, iprot, oprot) args = read_args(iprot, Update_table_column_statistics_args) result = Update_table_column_statistics_result.new() @@ -6074,13 +6133,15 @@ module ThriftHiveMetastore FOREIGNKEYS = 3 UNIQUECONSTRAINTS = 4 NOTNULLCONSTRAINTS = 5 + DEFAULTCONSTRAINTS = 6 FIELDS = { TBL => {:type => ::Thrift::Types::STRUCT, :name => 'tbl', :class => ::Table}, PRIMARYKEYS => {:type => ::Thrift::Types::LIST, :name => 'primaryKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLPrimaryKey}}, FOREIGNKEYS => {:type => ::Thrift::Types::LIST, :name => 'foreignKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLForeignKey}}, UNIQUECONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'uniqueConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLUniqueConstraint}}, - NOTNULLCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'notNullConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLNotNullConstraint}} + NOTNULLCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'notNullConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLNotNullConstraint}}, + DEFAULTCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'defaultConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLDefaultConstraint}} } def struct_fields; FIELDS; end @@ -6283,6 +6344,40 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Add_default_constraint_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::AddDefaultConstraintRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Add_default_constraint_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Drop_table_args include ::Thrift::Struct, ::Thrift::Struct_Union DBNAME = 1 @@ -9109,6 +9204,42 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_default_constraints_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::DefaultConstraintsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_default_constraints_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::DefaultConstraintsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Update_table_column_statistics_args include ::Thrift::Struct, ::Thrift::Struct_Union STATS_OBJ = 1 diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index ac71d0882f..1c422ca281 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1409,13 +1409,13 @@ private void create_table_core(final RawStore ms, final Table tbl, final EnvironmentContext envContext) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException { - create_table_core(ms, tbl, envContext, null, null, null, null); + create_table_core(ms, tbl, envContext, null, null, null, null, null); } private void create_table_core(final RawStore ms, final Table tbl, final EnvironmentContext envContext, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, List defaultConstraints) throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException { if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) { @@ -1502,12 +1502,12 @@ private void create_table_core(final RawStore ms, final Table tbl, } if (primaryKeys == null && foreignKeys == null - && uniqueConstraints == null && notNullConstraints == null) { + && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null) { ms.createTable(tbl); } else { // Set constraint name if null before sending to listener List constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); int primaryKeySize = 0; if (primaryKeys != null) { primaryKeySize = primaryKeys.size(); @@ -1535,6 +1535,7 @@ private void create_table_core(final RawStore ms, final Table tbl, } } } + int notNullConstraintSize = notNullConstraints.size(); if (notNullConstraints != null) { for (int i = 0; i < notNullConstraints.size(); i++) { if (notNullConstraints.get(i).getNn_name() == null) { @@ -1542,6 +1543,14 @@ private void create_table_core(final RawStore ms, final Table tbl, } } } + if (defaultConstraints!= null) { + for (int i = 0; i < defaultConstraints.size(); i++) { + if (defaultConstraints.get(i).getDc_name() == null) { + defaultConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize + + uniqueConstraintSize + notNullConstraintSize + i)); + } + } + } } if (!transactionalListeners.isEmpty()) { @@ -1636,14 +1645,15 @@ public void create_table_with_environment_context(final Table tbl, public void create_table_with_constraints(final Table tbl, final List primaryKeys, final List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws AlreadyExistsException, MetaException, InvalidObjectException { startFunction("create_table", ": " + tbl.toString()); boolean success = false; Exception ex = null; try { create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); success = true; } catch (NoSuchObjectException e) { ex = e; @@ -1917,6 +1927,59 @@ public void add_not_null_constraint(AddNotNullConstraintRequest req) } } + @Override + public void add_default_constraint(AddDefaultConstraintRequest req) + throws MetaException, InvalidObjectException { + List defaultConstraintCols= req.getDefaultConstraintCols(); + String constraintName = (defaultConstraintCols != null && defaultConstraintCols.size() > 0) ? + defaultConstraintCols.get(0).getDc_name() : "null"; + startFunction("add_default_constraint", ": " + constraintName); + boolean success = false; + Exception ex = null; + RawStore ms = getMS(); + try { + ms.openTransaction(); + List constraintNames = ms.addDefaultConstraints(defaultConstraintCols); + // Set not null constraint name if null before sending to listener + if (defaultConstraintCols != null) { + for (int i = 0; i < defaultConstraintCols.size(); i++) { + if (defaultConstraintCols.get(i).getDc_name() == null) { + defaultConstraintCols.get(i).setDc_name(constraintNames.get(i)); + } + } + } + if (transactionalListeners.size() > 0) { + if (defaultConstraintCols != null && defaultConstraintCols.size() > 0) { + //TODO: Even listener for default + //AddDefaultConstraintEvent addDefaultConstraintEvent = new AddDefaultConstraintEvent(defaultConstraintCols, true, this); + //for (MetaStoreEventListener transactionalListener : transactionalListeners) { + // transactionalListener.onAddNotNullConstraint(addDefaultConstraintEvent); + //} + } + } + success = ms.commitTransaction(); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (defaultConstraintCols != null && defaultConstraintCols.size() > 0) { + for (MetaStoreEventListener listener : listeners) { + //AddNotNullConstraintEvent addDefaultConstraintEvent = new AddNotNullConstraintEvent(defaultConstraintCols, true, this); + //listener.onAddDefaultConstraint(addDefaultConstraintEvent); + } + } + endFunction("add_default_constraint", success, ex, constraintName); + } + } + private boolean is_table_exists(RawStore ms, String dbname, String name) throws MetaException { return (ms.getTable(dbname, name) != null); @@ -7327,6 +7390,28 @@ public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsReq return new NotNullConstraintsResponse(ret); } + @Override + public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request) + throws TException { + String db_name = request.getDb_name(); + String tbl_name = request.getTbl_name(); + startTableFunction("get_default_constraints", db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().getDefaultConstraints(db_name, tbl_name); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_default_constraints", ret != null, ex, tbl_name); + } + return new DefaultConstraintsResponse(ret); + } @Override public String get_metastore_db_uuid() throws TException { try { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 59c0cff1c2..da0d329fd3 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -801,7 +801,8 @@ public void createTable(Table tbl, EnvironmentContext envContext) throws Already public void createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { HiveMetaHook hook = getHook(tbl); @@ -812,7 +813,7 @@ public void createTableWithConstraints(Table tbl, try { // Subclasses can override this step (for example, for temporary tables) client.create_table_with_constraints(tbl, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints); if (hook != null) { hook.commitCreateTable(tbl); } @@ -854,6 +855,12 @@ public void addNotNullConstraint(List notNullConstraintCol client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols)); } + @Override + public void addDefaultConstraint(List defaultConstraints) throws + NoSuchObjectException, MetaException, TException { + client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints)); + } + /** * @param type * @return true or false @@ -1763,6 +1770,11 @@ public Index getIndex(String dbName, String tblName, String indexName) return client.get_not_null_constraints(req).getNotNullConstraints(); } + @Override + public List getDefaultConstraints(DefaultConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_default_constraints(req).getDefaultConstraints(); + } /** {@inheritDoc} */ @Override diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 43aeeb3212..037331f9f3 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -90,6 +91,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -1811,11 +1813,15 @@ boolean cacheFileMetadata(String dbName, String tableName, String partName, List getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException, NoSuchObjectException, TException; + List getDefaultConstraints(DefaultConstraintsRequest request) throws MetaException, + NoSuchObjectException, TException; + void createTableWithConstraints( org.apache.hadoop.hive.metastore.api.Table tTbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; void dropConstraint(String dbName, String tableName, String constraintName) throws @@ -1833,6 +1839,9 @@ void addUniqueConstraint(List uniqueConstraintCols) throws void addNotNullConstraint(List notNullConstraintCols) throws MetaException, NoSuchObjectException, TException; + void addDefaultConstraint(List defaultConstraints) throws + MetaException, NoSuchObjectException, TException; + /** * Gets the unique id of the backing database instance used for storing metadata * @return unique id of the backing database instance diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 46412b0e9b..6ead20aeaf 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -2282,4 +2283,64 @@ public void closeAllQueries() { return ret; } + public List getDefaultConstraints(String db_name, String tbl_name) + throws MetaException { + List ret = new ArrayList(); + String queryText = + "SELECT " + DBS + ".\"NAME\", " + TBLS + ".\"TBL_NAME\"," + + "CASE WHEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" IS NOT NULL THEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" " + + "ELSE " + PARTITION_KEYS + ".\"PKEY_NAME\" END, " + + "" + KEY_CONSTRAINTS + ".\"CONSTRAINT_NAME\", " + KEY_CONSTRAINTS + ".\"ENABLE_VALIDATE_RELY\", " + + "" + KEY_CONSTRAINTS + ".\"DEFAULT_VALUE\" " + + " from " + TBLS + " " + + " INNER JOIN " + KEY_CONSTRAINTS + " ON " + TBLS + ".\"TBL_ID\" = " + KEY_CONSTRAINTS + ".\"PARENT_TBL_ID\" " + + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + + " LEFT OUTER JOIN " + COLUMNS_V2 + " ON " + COLUMNS_V2 + ".\"CD_ID\" = " + KEY_CONSTRAINTS + ".\"PARENT_CD_ID\" AND " + + " " + COLUMNS_V2 + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.DEFAULT_CONSTRAINT+ " AND" + + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; + + queryText = queryText.trim(); + if (queryText.endsWith("AND")) { + queryText = queryText.substring(0, queryText.length()-3); + } + if (LOG.isDebugEnabled()){ + LOG.debug("getDefaultConstraints: directsql : " + queryText); + } + List pms = new ArrayList(); + if (db_name != null) { + pms.add(db_name); + } + if (tbl_name != null) { + pms.add(tbl_name); + } + + Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); + List sqlResult = ensureList(executeWithArray( + queryParams, pms.toArray(), queryText)); + + if (!sqlResult.isEmpty()) { + for (Object[] line : sqlResult) { + int enableValidateRely = extractSqlInt(line[4]); + boolean enable = (enableValidateRely & 4) != 0; + boolean validate = (enableValidateRely & 2) != 0; + boolean rely = (enableValidateRely & 1) != 0; + SQLDefaultConstraint currConstraint = new SQLDefaultConstraint( + extractSqlString(line[0]), + extractSqlString(line[1]), + extractSqlString(line[2]), + extractSqlString(line[5]), + extractSqlString(line[3]), + enable, + validate, + rely); + ret.add(currConstraint); + } + } + return ret; + } + } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 7b44df4128..11667b74af 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -117,6 +117,7 @@ import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -1092,8 +1093,8 @@ public boolean dropType(String typeName) { @Override public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, - List uniqueConstraints, - List notNullConstraints) + List uniqueConstraints, List notNullConstraints, + List defaultConstraints) throws InvalidObjectException, MetaException { boolean success = false; try { @@ -1106,6 +1107,7 @@ public boolean dropType(String typeName) { constraintNames.addAll(addPrimaryKeys(primaryKeys, false)); constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false)); constraintNames.addAll(addNotNullConstraints(notNullConstraints, false)); + constraintNames.addAll(addDefaultConstraints(defaultConstraints, false)); success = commitTransaction(); return constraintNames; } finally { @@ -4245,6 +4247,9 @@ private String getGuidFromDB() throws MetaException { } } else { currentConstraintName = normalizeIdentifier(foreignKey.getFk_name()); + if(constraintNameAlreadyExists(currentConstraintName)) { + throw new InvalidObjectException("Constraint name already exists: " + currentConstraintName); + } } fkNames.add(currentConstraintName); Integer updateRule = foreignKey.getUpdate_rule(); @@ -4396,6 +4401,9 @@ private static String generateColNameTypeSignature(String colName, String colTyp } } else { constraintName = normalizeIdentifier(pks.get(i).getPk_name()); + if(constraintNameAlreadyExists(constraintName)) { + throw new InvalidObjectException("Constraint name already exists: " + constraintName); + } } pkNames.add(constraintName); int enableValidateRely = (pks.get(i).isEnable_cstr() ? 4 : 0) + @@ -4461,6 +4469,9 @@ private static String generateColNameTypeSignature(String colName, String colTyp } } else { constraintName = normalizeIdentifier(uks.get(i).getUk_name()); + if(constraintNameAlreadyExists(constraintName)) { + throw new InvalidObjectException("Constraint name already exists: " + constraintName); + } } ukNames.add(constraintName); @@ -4491,6 +4502,75 @@ private static String generateColNameTypeSignature(String colName, String colTyp return addNotNullConstraints(nns, true); } + @Override + public List addDefaultConstraints(List nns) + throws InvalidObjectException, MetaException { + return addDefaultConstraints(nns, true); + } + + private List addDefaultConstraints(List nns, boolean retrieveCD) + throws InvalidObjectException, MetaException { + List nnNames = new ArrayList<>(); + List cstrs = new ArrayList<>(); + String constraintName = null; + + for (int i = 0; i < nns.size(); i++) { + final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); + final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); + final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); + + // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. + // For instance, this is the case when we are creating the table. + AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + MTable parentTable = nParentTable.mtbl; + if (parentTable == null) { + throw new InvalidObjectException("Parent table not found: " + tableName); + } + + MColumnDescriptor parentCD = retrieveCD ? nParentTable.mcd : parentTable.getSd().getCD(); + int parentIntegerIndex = getColumnIndexFromTableColumns(parentCD == null ? null : parentCD.getCols(), columnName); + if (parentIntegerIndex == -1) { + if (parentTable.getPartitionKeys() != null) { + parentCD = null; + parentIntegerIndex = getColumnIndexFromTableColumns(parentTable.getPartitionKeys(), columnName); + } + if (parentIntegerIndex == -1) { + throw new InvalidObjectException("Parent column not found: " + columnName); + } + } + if (nns.get(i).getDc_name() == null) { + constraintName = generateConstraintName(tableDB, tableName, columnName, "dc"); + } else { + constraintName = normalizeIdentifier(nns.get(i).getDc_name()); + if(constraintNameAlreadyExists(constraintName)) { + throw new InvalidObjectException("Constraint name already exists: " + constraintName); + } + } + nnNames.add(constraintName); + + int enableValidateRely = (nns.get(i).isEnable_cstr() ? 4 : 0) + + (nns.get(i).isValidate_cstr() ? 2 : 0) + (nns.get(i).isRely_cstr() ? 1 : 0); + String defaultValue = nns.get(i).getDefault_value(); + MConstraint muk = new MConstraint( + constraintName, + MConstraint.DEFAULT_CONSTRAINT, + 1, // Not null constraint should reference a single column + null, + null, + enableValidateRely, + parentTable, + null, + parentCD, + null, + null, + parentIntegerIndex, + defaultValue); + cstrs.add(muk); + } + pm.makePersistentAll(cstrs); + return nnNames; + } + private List addNotNullConstraints(List nns, boolean retrieveCD) throws InvalidObjectException, MetaException { List nnNames = new ArrayList<>(); @@ -4525,6 +4605,9 @@ private static String generateColNameTypeSignature(String colName, String colTyp constraintName = generateConstraintName(tableDB, tableName, columnName, "nn"); } else { constraintName = normalizeIdentifier(nns.get(i).getNn_name()); + if(constraintNameAlreadyExists(constraintName)) { + throw new InvalidObjectException("Constraint name already exists: " + constraintName); + } } nnNames.add(constraintName); @@ -9555,37 +9638,107 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } } - protected List getNotNullConstraintsInternal(final String db_name_input, + @Override + public List getDefaultConstraints(String db_name, String tbl_name) + throws MetaException { + try { + return getDefaultConstraintsInternal(db_name, tbl_name, true, true); + } catch (NoSuchObjectException e) { + throw new MetaException(ExceptionUtils.getStackTrace(e)); + } + } + + protected List getDefaultConstraintsInternal(final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); + return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + + @Override + protected List getSqlResult(GetHelper> ctx) + throws MetaException { + return directSql.getDefaultConstraints(db_name, tbl_name); + } + + @Override + protected List getJdoResult(GetHelper> ctx) + throws MetaException, NoSuchObjectException { + return getDefaultConstraintsViaJdo(db_name, tbl_name); + } + }.run(false); + } + + private List getDefaultConstraintsViaJdo(String db_name, String tbl_name) + throws MetaException { + boolean commited = false; + List defaultConstraints= null; + Query query = null; + try { + openTransaction(); + query = pm.newQuery(MConstraint.class, + "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " constraintType == MConstraint.DEFAULT_CONSTRAINT"); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); + Collection constraints = (Collection) query.execute(tbl_name, db_name); + pm.retrieveAll(constraints); + defaultConstraints = new ArrayList<>(); + for (Iterator i = constraints.iterator(); i.hasNext();) { + MConstraint currConstraint = (MConstraint) i.next(); + List cols = currConstraint.getParentColumn() != null ? + currConstraint.getParentColumn().getCols() : currConstraint.getParentTable().getPartitionKeys(); + int enableValidateRely = currConstraint.getEnableValidateRely(); + boolean enable = (enableValidateRely & 4) != 0; + boolean validate = (enableValidateRely & 2) != 0; + boolean rely = (enableValidateRely & 1) != 0; + defaultConstraints.add(new SQLDefaultConstraint(db_name, + tbl_name, + cols.get(currConstraint.getParentIntegerIndex()).getName(), + currConstraint.getDefaultValue(), currConstraint.getConstraintName(), enable, validate, rely)); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); + } + } + return defaultConstraints; + } + + protected List getNotNullConstraintsInternal(final String db_name_input, + final String tbl_name_input, boolean allowSql, boolean allowJdo) + throws MetaException, NoSuchObjectException { + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) - throws MetaException { + throws MetaException { return directSql.getNotNullConstraints(db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) - throws MetaException, NoSuchObjectException { + throws MetaException, NoSuchObjectException { return getNotNullConstraintsViaJdo(db_name, tbl_name); } }.run(false); } private List getNotNullConstraintsViaJdo(String db_name, String tbl_name) - throws MetaException { + throws MetaException { boolean commited = false; List notNullConstraints = null; Query query = null; try { openTransaction(); query = pm.newQuery(MConstraint.class, - "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" - + " constraintType == MConstraint.NOT_NULL_CONSTRAINT"); + "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " constraintType == MConstraint.NOT_NULL_CONSTRAINT"); query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); @@ -9599,9 +9752,9 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; notNullConstraints.add(new SQLNotNullConstraint(db_name, - tbl_name, - cols.get(currConstraint.getParentIntegerIndex()).getName(), - currConstraint.getConstraintName(), enable, validate, rely)); + tbl_name, + cols.get(currConstraint.getParentIntegerIndex()).getName(), + currConstraint.getConstraintName(), enable, validate, rely)); } commited = commitTransaction(); } finally { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index f500d63725..6a8036f71b 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -740,9 +741,13 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] List getNotNullConstraints(String db_name, String tbl_name) throws MetaException; + List getDefaultConstraints(String db_name, + String tbl_name) throws MetaException; + List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) throws InvalidObjectException, MetaException; + List notNullConstraints, + List defaultConstraints) throws InvalidObjectException, MetaException; void dropConstraint(String dbName, String tableName, String constraintName) throws NoSuchObjectException; @@ -754,6 +759,8 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException; + List addDefaultConstraints(List nns) throws InvalidObjectException, MetaException; + /** * Gets the unique id of the backing datastore for the metadata * @return diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 0d132f2074..2ee5433703 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -2423,13 +2424,21 @@ public int getDatabaseCount() throws MetaException { return rawStore.getNotNullConstraints(db_name, tbl_name); } + @Override + public List getDefaultConstraints(String db_name, String tbl_name) + throws MetaException { + // TODO constraintCache + return rawStore.getDefaultConstraints(db_name, tbl_name); + } + @Override public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) throws InvalidObjectException, MetaException { + List notNullConstraints, + List defaultConstraints) throws InvalidObjectException, MetaException { // TODO constraintCache List constraintNames = rawStore.createTableWithConstraints(tbl, primaryKeys, - foreignKeys, uniqueConstraints, notNullConstraints); + foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); String dbName = StringUtils.normalizeIdentifier(tbl.getDbName()); String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); if (!shouldCacheTable(dbName, tblName)) { @@ -2477,6 +2486,13 @@ public void dropConstraint(String dbName, String tableName, return rawStore.addNotNullConstraints(nns); } + @Override + public List addDefaultConstraints(List nns) + throws InvalidObjectException, MetaException { + // TODO constraintCache + return rawStore.addDefaultConstraints(nns); + } + @Override public List getPartitionColStatsForDatabase(String dbName) throws MetaException, NoSuchObjectException { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MConstraint.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MConstraint.java index 865a9eed87..8c7f57fdc1 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MConstraint.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MConstraint.java @@ -33,6 +33,7 @@ Integer childIntegerIndex; Integer parentIntegerIndex; int enableValidateRely; + String defaultValue; // 0 - Primary Key // 1 - PK-FK relationship @@ -42,6 +43,7 @@ public final static int FOREIGN_KEY_CONSTRAINT = 1; public final static int UNIQUE_CONSTRAINT = 2; public final static int NOT_NULL_CONSTRAINT = 3; + public final static int DEFAULT_CONSTRAINT = 4; @SuppressWarnings("serial") public static class PK implements Serializable { @@ -90,6 +92,29 @@ public MConstraint(String constraintName, int constraintType, int position, Inte this.parentIntegerIndex = parentIntegerIndex; } + public MConstraint(String constraintName, int constraintType, int position, Integer deleteRule, Integer updateRule, + int enableRelyValidate, MTable parentTable, MTable childTable, MColumnDescriptor parentColumn, + MColumnDescriptor childColumn, Integer childIntegerIndex, Integer parentIntegerIndex, String defaultValue) { + this.constraintName = constraintName; + this.constraintType = constraintType; + this.parentTable = parentTable; + this.childTable = childTable; + this.parentColumn = parentColumn; + this.childColumn = childColumn; + this.position = position; + this.deleteRule = deleteRule; + this.updateRule = updateRule; + this.enableValidateRely = enableRelyValidate; + this.childIntegerIndex = childIntegerIndex; + this.parentIntegerIndex = parentIntegerIndex; + this.defaultValue = defaultValue; + } + + public String getDefaultValue() { return defaultValue; } + + public void setDefaultValue(String defaultValue) { + this.defaultValue = defaultValue; + } public String getConstraintName() { return constraintName; } @@ -139,7 +164,7 @@ public void setChildIntegerIndex(Integer childIntegerIndex) { } public Integer getParentIntegerIndex() { - return childIntegerIndex; + return parentIntegerIndex; } public void setParentIntegerIndex(Integer parentIntegerIndex) { diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql index 9d8a703b33..86ef6b13ad 100644 --- a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql +++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql @@ -108,7 +108,7 @@ CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHA CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); -CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL); +CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL, "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, "DEFAULT_VALUE" VARCHAR(400)); CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000)); diff --git a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql index a50c45d4a0..1fc34bcc0d 100644 --- a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -120,3 +120,7 @@ RENAME COLUMN COMPLETED_COMPACTIONS.CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_ID; -- Modify txn_components/completed_txn_components tables to add write id. ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID bigint; ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; + +-- HIVE-18726 +-- add a new column to support default value for DEFAULT constraint +ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD COLUMN "DEFAULT_VALUE" VARCHAR(400); diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql index 1b7d0da1cc..e0bb25b239 100644 --- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql +++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql @@ -1105,7 +1105,8 @@ CREATE TABLE KEY_CONSTRAINTS CONSTRAINT_TYPE SMALLINT NOT NULL, UPDATE_RULE SMALLINT, DELETE_RULE SMALLINT, - ENABLE_VALIDATE_RELY SMALLINT NOT NULL + ENABLE_VALIDATE_RELY SMALLINT NOT NULL, + DEFAULT_VALUE VARCHAR(400) ) ; ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION); diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql index 8ab466d5e7..3a731612d2 100644 --- a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql +++ b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql @@ -174,3 +174,7 @@ EXEC SP_RENAME 'COMPLETED_COMPACTIONS.CC_HIGHEST_TXN_ID', 'CC_HIGHEST_WRITE_ID', -- Modify txn_components/completed_txn_components tables to add write id. ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID bigint; ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; + +-- HIVE-18726 + -- add a new column to support default value for DEFAULT constraint + ALTER TABLE KEY_CONSTRAINTS ADD COLUMN DEFAULT_VALUE VARCHAR(400); diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql index 886c93262b..566badf2c7 100644 --- a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql +++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql @@ -859,6 +859,7 @@ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` `UPDATE_RULE` SMALLINT(6), `DELETE_RULE` SMALLINT(6), `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL, + `DEFAULT_VALUE` VARCHAR(400), PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql index a5377342aa..35f08dc5bf 100644 --- a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql +++ b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql @@ -159,3 +159,7 @@ ALTER TABLE COMPLETED_COMPACTIONS CHANGE `CC_HIGHEST_TXN_ID` `CC_HIGHEST_WRITE_I -- Modify txn_components/completed_txn_components tables to add write id. ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID bigint; ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; + +-- HIVE-18726 +-- add a new column to support default value for DEFAULT constraint +ALTER TABLE `KEY_CONSTRAINTS` ADD COLUMN `DEFAULT_VALUE` VARCHAR(400); diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql index 366b2d90a0..82811ee8d7 100644 --- a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql +++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql @@ -869,7 +869,8 @@ CREATE TABLE KEY_CONSTRAINTS CONSTRAINT_TYPE NUMBER NOT NULL, UPDATE_RULE NUMBER, DELETE_RULE NUMBER, - ENABLE_VALIDATE_RELY NUMBER NOT NULL + ENABLE_VALIDATE_RELY NUMBER NOT NULL, + DEFAULT_VALUE VARCHAR(400) ) ; ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION); diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql index bd786fb03d..edf14d98cb 100644 --- a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -157,6 +157,7 @@ UPDATE DBS SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4) WHERE DB_LOCATION_URI LIKE 's3n://%' ; + -- HIVE-18192 CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID number(19) NOT NULL, @@ -182,3 +183,7 @@ ALTER TABLE COMPLETED_COMPACTIONS RENAME COLUMN CC_HIGHEST_TXN_ID TO CC_HIGHEST_ -- Modify txn_components/completed_txn_components tables to add write id. ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID number(19); ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID number(19); + +-- HIVE-18726 +-- add a new column to support default value for DEFAULT constraint +ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400); diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql index 4abf24c96b..543a3586ca 100644 --- a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql +++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql @@ -624,6 +624,7 @@ CREATE TABLE "KEY_CONSTRAINTS" "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, + "DEFAULT_VALUE" VARCHAR(400), PRIMARY KEY ("CONSTRAINT_NAME", "POSITION") ) ; diff --git a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql index 34ed9742fa..ed4ce22dfb 100644 --- a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql +++ b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql @@ -198,3 +198,7 @@ ALTER TABLE COMPLETED_COMPACTIONS RENAME CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_I -- Modify txn_components/completed_txn_components tables to add write id. ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID bigint; ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; + +-- HIVE-18726 +-- add a new column to support default value for DEFAULT constraint +ALTER TABLE "KEY_CONSTRAINTS" ADD COLUMN "DEFAULT_VALUE" VARCHAR(400); diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift index b11ee380b4..e95014a82e 100644 --- a/standalone-metastore/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -90,6 +90,17 @@ struct SQLNotNullConstraint { 7: bool rely_cstr // Rely/No Rely } +struct SQLDefaultConstraint { + 1: string table_db, // table schema + 2: string table_name, // table name + 3: string column_name, // column name + 4: string default_value,// default value + 5: string dc_name, // default name + 6: bool enable_cstr, // Enable/Disable + 7: bool validate_cstr, // Validate/No validate + 8: bool rely_cstr // Rely/No Rely +} + struct Type { 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) @@ -537,6 +548,16 @@ struct NotNullConstraintsResponse { 1: required list notNullConstraints } +struct DefaultConstraintsRequest { + 1: required string db_name, + 2: required string tbl_name +} + +struct DefaultConstraintsResponse { + 1: required list defaultConstraints +} + + struct DropConstraintRequest { 1: required string dbname, 2: required string tablename, @@ -559,6 +580,10 @@ struct AddNotNullConstraintRequest { 1: required list notNullConstraintCols } +struct AddDefaultConstraintRequest { + 1: required list defaultConstraintCols +} + // Return type for get_partitions_by_expr struct PartitionsByExprResult { 1: required list partitions, @@ -1419,7 +1444,8 @@ service ThriftHiveMetastore extends fb303.FacebookService 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) void create_table_with_constraints(1:Table tbl, 2: list primaryKeys, 3: list foreignKeys, - 4: list uniqueConstraints, 5: list notNullConstraints) + 4: list uniqueConstraints, 5: list notNullConstraints, + 6: list defaultConstraints) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) @@ -1433,6 +1459,8 @@ service ThriftHiveMetastore extends fb303.FacebookService throws(1:NoSuchObjectException o1, 2:MetaException o2) void add_not_null_constraint(1:AddNotNullConstraintRequest req) throws(1:NoSuchObjectException o1, 2:MetaException o2) + void add_default_constraint(1:AddDefaultConstraintRequest req) + throws(1:NoSuchObjectException o1, 2:MetaException o2) // drops the table and all the partitions associated with it if the table has partitions // delete data (including partitions) if deleteData is set to true @@ -1696,6 +1724,8 @@ service ThriftHiveMetastore extends fb303.FacebookService throws(1:MetaException o1, 2:NoSuchObjectException o2) NotNullConstraintsResponse get_not_null_constraints(1:NotNullConstraintsRequest request) throws(1:MetaException o1, 2:NoSuchObjectException o2) + DefaultConstraintsResponse get_default_constraints(1:DefaultConstraintsRequest request) + throws(1:MetaException o1, 2:NoSuchObjectException o2) // column statistics interfaces diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 75ea8c4a77..8c0cc266a3 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -904,11 +905,19 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { return null; } + @Override + public List getDefaultConstraints(String db_name, String tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; + } + @Override public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub return null; @@ -946,6 +955,13 @@ public void dropConstraint(String dbName, String tableName, return null; } + @Override + public List addDefaultConstraints(List nns) + throws InvalidObjectException, MetaException { + // TODO Auto-generated method stub + return null; + } + @Override public String getMetastoreDbUuid() throws MetaException { throw new MetaException("Get metastore uuid is not implemented"); diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 207d842f94..a0b2f986e3 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; @@ -914,11 +915,19 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { return null; } + @Override + public List getDefaultConstraints(String db_name, String tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; + } + @Override public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints) + List notNullConstraints, + List defaultConstraints) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub return null; @@ -957,6 +966,13 @@ public void dropConstraint(String dbName, String tableName, return null; } + @Override + public List addDefaultConstraints(List nns) + throws InvalidObjectException, MetaException { + // TODO Auto-generated method stub + return null; + } + @Override public String getMetastoreDbUuid() throws MetaException { throw new MetaException("Get metastore uuid is not implemented");