diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift
index a4fb612..3635054 100755
--- metastore/if/hive_metastore.thrift
+++ metastore/if/hive_metastore.thrift
@@ -227,8 +227,7 @@ struct SerDeInfo {
// sort order of a column (column name along with asc(1)/desc(0))
struct Order {
1: string col, // sort column name
- 2: i32 order, // asc(1) or desc(0)
- 3: i32 nullOrder // nulls first(0) or nulls last(1)
+ 2: i32 order // asc(1) or desc(0)
}
// this object holds all the information about skewed table
diff --git metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql
deleted file mode 100644
index a8b48bf..0000000
--- metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE "APP".SORT_COLS ADD "NULL_ORDER" INTEGER NOT NULL DEFAULT 0;
-UPDATE "APP".SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;
diff --git metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
index 00c49ae..42f4eb6 100644
--- metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
+++ metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
@@ -28,7 +28,7 @@ CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCH
CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(1000) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
-CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "NULL_ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
diff --git metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
index 13aeabb..a0bac3c 100644
--- metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
+++ metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
@@ -1,5 +1,4 @@
-- Upgrade MetaStore schema from 2.0.0 to 2.1.0
RUN '033-HIVE-12892.derby.sql';
-RUN '034-HIVE-12994.derby.sql';
UPDATE "APP".VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
diff --git metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql
deleted file mode 100644
index 51e2c43..0000000
--- metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE SORT_COLS ADD "NULL_ORDER" int NOT NULL DEFAULT 0;
-UPDATE SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;
diff --git metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
index c98c7ef..cf5a662 100644
--- metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
+++ metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
@@ -411,7 +411,6 @@ CREATE TABLE SORT_COLS
SD_ID bigint NOT NULL,
"COLUMN_NAME" nvarchar(1000) NULL,
"ORDER" int NOT NULL,
- "NULL_ORDER" int NOT NULL,
INTEGER_IDX int NOT NULL
);
diff --git metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
index 3b4a2a3..f25daf2 100644
--- metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
+++ metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
@@ -1,7 +1,6 @@
SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE;
:r 018-HIVE-12892.mssql.sql;
-:r 019-HIVE-12994.mssql.sql;
UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE;
diff --git metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql
deleted file mode 100644
index ff4cf37..0000000
--- metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE `SORT_COLS` ADD `NULL_ORDER` INTEGER NOT NULL DEFAULT 0;
-UPDATE `SORT_COLS` SET `NULL_ORDER` = 1 WHERE `ORDER` = 0;
diff --git metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
index 1369349..6fd3209 100644
--- metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
+++ metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
@@ -546,7 +546,6 @@ CREATE TABLE IF NOT EXISTS `SORT_COLS` (
`SD_ID` bigint(20) NOT NULL,
`COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
`ORDER` int(11) NOT NULL,
- `NULL_ORDER` int(11) NOT NULL,
`INTEGER_IDX` int(11) NOT NULL,
PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
KEY `SORT_COLS_N49` (`SD_ID`),
diff --git metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
index 09bb8b8..e790636 100644
--- metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
+++ metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
@@ -1,7 +1,6 @@
SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' ';
SOURCE 033-HIVE-12892.mysql.sql;
-SOURCE 034-HIVE-12994.mysql.sql;
UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' ';
diff --git metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql
deleted file mode 100644
index 0b57ef8..0000000
--- metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE SORT_COLS ADD "NULL_ORDER" NUMBER (10) NOT NULL DEFAULT 0;
-UPDATE SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;
diff --git metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
index 198f0bd..774f6be 100644
--- metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
+++ metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
@@ -274,7 +274,6 @@ CREATE TABLE SORT_COLS
SD_ID NUMBER NOT NULL,
"COLUMN_NAME" VARCHAR2(1000) NULL,
"ORDER" NUMBER (10) NOT NULL,
- "NULL_ORDER" NUMBER (10) NOT NULL,
INTEGER_IDX NUMBER(10) NOT NULL
);
diff --git metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
index a729baa..8368d08 100644
--- metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
+++ metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
@@ -1,7 +1,6 @@
SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual;
@033-HIVE-12892.oracle.sql;
-@034-HIVE-12994.oracle.sql;
UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual;
diff --git metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql
deleted file mode 100644
index 0894fed..0000000
--- metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE SORT_COLS ADD COLUMN "NULL_ORDER" bigint NOT NULL DEFAULT 0;
-UPDATE SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;
diff --git metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
index 48d16de..7463a37 100644
--- metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
+++ metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
@@ -342,7 +342,6 @@ CREATE TABLE "SORT_COLS" (
"SD_ID" bigint NOT NULL,
"COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
"ORDER" bigint NOT NULL,
- "NULL_ORDER" bigint NOT NULL,
"INTEGER_IDX" bigint NOT NULL
);
diff --git metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
index 30d070d..6172407 100644
--- metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
+++ metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
@@ -1,7 +1,6 @@
SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0';
\i 032-HIVE-12892.postgres.sql;
-\i 033-HIVE-12994.postgres.sql;
UPDATE "VERSION" SET "SCHEMA_VERSION"='2.1.0', "VERSION_COMMENT"='Hive release version 2.1.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0';
diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
index 3057fff..3b2d7b5 100644
--- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
+++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
@@ -22582,16 +22582,6 @@ public StorageDescriptor parsePartialFrom(
* optional sint32 order = 2 [default = 1];
*/
int getOrder();
-
- // optional sint32 nullOrder = 3 [default = 0];
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- boolean hasNullOrder();
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- int getNullOrder();
}
/**
* Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order}
@@ -22654,11 +22644,6 @@ private Order(
order_ = input.readSInt32();
break;
}
- case 24: {
- bitField0_ |= 0x00000004;
- nullOrder_ = input.readSInt32();
- break;
- }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -22758,26 +22743,9 @@ public int getOrder() {
return order_;
}
- // optional sint32 nullOrder = 3 [default = 0];
- public static final int NULLORDER_FIELD_NUMBER = 3;
- private int nullOrder_;
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- public boolean hasNullOrder() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- public int getNullOrder() {
- return nullOrder_;
- }
-
private void initFields() {
columnName_ = "";
order_ = 1;
- nullOrder_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -22801,9 +22769,6 @@ public void writeTo(com.google.protobuf.CodedOutputStream output)
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeSInt32(2, order_);
}
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- output.writeSInt32(3, nullOrder_);
- }
getUnknownFields().writeTo(output);
}
@@ -22821,10 +22786,6 @@ public int getSerializedSize() {
size += com.google.protobuf.CodedOutputStream
.computeSInt32Size(2, order_);
}
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
- size += com.google.protobuf.CodedOutputStream
- .computeSInt32Size(3, nullOrder_);
- }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -22945,8 +22906,6 @@ public Builder clear() {
bitField0_ = (bitField0_ & ~0x00000001);
order_ = 1;
bitField0_ = (bitField0_ & ~0x00000002);
- nullOrder_ = 0;
- bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -22983,10 +22942,6 @@ public Builder clone() {
to_bitField0_ |= 0x00000002;
}
result.order_ = order_;
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
- to_bitField0_ |= 0x00000004;
- }
- result.nullOrder_ = nullOrder_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -23011,9 +22966,6 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr
if (other.hasOrder()) {
setOrder(other.getOrder());
}
- if (other.hasNullOrder()) {
- setNullOrder(other.getNullOrder());
- }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -23152,39 +23104,6 @@ public Builder clearOrder() {
return this;
}
- // optional sint32 nullOrder = 3 [default = 0];
- private int nullOrder_ ;
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- public boolean hasNullOrder() {
- return ((bitField0_ & 0x00000004) == 0x00000004);
- }
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- public int getNullOrder() {
- return nullOrder_;
- }
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- public Builder setNullOrder(int value) {
- bitField0_ |= 0x00000004;
- nullOrder_ = value;
- onChanged();
- return this;
- }
- /**
- * optional sint32 nullOrder = 3 [default = 0];
- */
- public Builder clearNullOrder() {
- bitField0_ = (bitField0_ & ~0x00000004);
- nullOrder_ = 0;
- onChanged();
- return this;
- }
-
// @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order)
}
@@ -34828,7 +34747,7 @@ public Builder removeRange(int index) {
"ant_info\030\001 \003(\01325.org.apache.hadoop.hive." +
"metastore.hbase.RoleGrantInfo\"\030\n\010RoleLis",
"t\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013create_time\030\001" +
- " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\302\010\n\021StorageDesc" +
+ " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021StorageDesc" +
"riptor\022A\n\004cols\030\001 \003(\01323.org.apache.hadoop" +
".hive.metastore.hbase.FieldSchema\022\024\n\014inp" +
"ut_format\030\002 \001(\t\022\025\n\routput_format\030\003 \001(\t\022\025" +
@@ -34841,51 +34760,51 @@ public Builder removeRange(int index) {
"skewed_info\030\t \001(\0132D.org.apache.hadoop.hi" +
"ve.metastore.hbase.StorageDescriptor.Ske" +
"wedInfo\022!\n\031stored_as_sub_directories\030\n \001" +
- "(\010\032D\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" +
- "r\030\002 \001(\021:\0011\022\024\n\tnullOrder\030\003 \001(\021:\0010\032|\n\tSerD" +
- "eInfo\022\014\n\004name\030\001 \001(\t\022\031\n\021serialization_lib" +
- "\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.org.apache." +
- "hadoop.hive.metastore.hbase.Parameters\032\214" +
- "\003\n\nSkewedInfo\022\030\n\020skewed_col_names\030\001 \003(\t\022",
- "r\n\021skewed_col_values\030\002 \003(\0132W.org.apache." +
- "hadoop.hive.metastore.hbase.StorageDescr" +
- "iptor.SkewedInfo.SkewedColValueList\022\206\001\n\036" +
- "skewed_col_value_location_maps\030\003 \003(\0132^.o" +
- "rg.apache.hadoop.hive.metastore.hbase.St" +
- "orageDescriptor.SkewedInfo.SkewedColValu" +
- "eLocationMap\032.\n\022SkewedColValueList\022\030\n\020sk" +
- "ewed_col_value\030\001 \003(\t\0327\n\031SkewedColValueLo" +
- "cationMap\022\013\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004" +
- "\n\005Table\022\r\n\005owner\030\001 \001(\t\022\023\n\013create_time\030\002 ",
- "\001(\003\022\030\n\020last_access_time\030\003 \001(\003\022\021\n\tretenti" +
- "on\030\004 \001(\003\022\020\n\010location\030\005 \001(\t\022I\n\rsd_paramet" +
- "ers\030\006 \001(\01322.org.apache.hadoop.hive.metas" +
- "tore.hbase.Parameters\022\017\n\007sd_hash\030\007 \002(\014\022K" +
- "\n\016partition_keys\030\010 \003(\01323.org.apache.hado" +
- "op.hive.metastore.hbase.FieldSchema\022F\n\np" +
- "arameters\030\t \001(\01322.org.apache.hadoop.hive" +
- ".metastore.hbase.Parameters\022\032\n\022view_orig" +
- "inal_text\030\n \001(\t\022\032\n\022view_expanded_text\030\013 " +
- "\001(\t\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivileges\030\r ",
- "\001(\0132=.org.apache.hadoop.hive.metastore.h" +
- "base.PrincipalPrivilegeSet\022\024\n\014is_tempora" +
- "ry\030\016 \001(\010\"\353\004\n\026PartitionKeyComparator\022\r\n\005n" +
- "ames\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G." +
- "org.apache.hadoop.hive.metastore.hbase.P" +
- "artitionKeyComparator.Operator\022S\n\005range\030" +
- "\004 \003(\0132D.org.apache.hadoop.hive.metastore" +
- ".hbase.PartitionKeyComparator.Range\032(\n\004M" +
- "ark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001" +
- "\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.or",
- "g.apache.hadoop.hive.metastore.hbase.Par" +
- "titionKeyComparator.Mark\022P\n\003end\030\003 \001(\0132C." +
- "org.apache.hadoop.hive.metastore.hbase.P" +
- "artitionKeyComparator.Mark\032\241\001\n\010Operator\022" +
- "Z\n\004type\030\001 \002(\0162L.org.apache.hadoop.hive.m" +
- "etastore.hbase.PartitionKeyComparator.Op" +
- "erator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n" +
- "\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*#\n\rPrinci" +
- "palType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001"
+ "(\010\032.\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" +
+ "r\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004name\030\001 \001(\t\022\031\n" +
+ "\021serialization_lib\030\002 \001(\t\022F\n\nparameters\030\003" +
+ " \001(\01322.org.apache.hadoop.hive.metastore." +
+ "hbase.Parameters\032\214\003\n\nSkewedInfo\022\030\n\020skewe" +
+ "d_col_names\030\001 \003(\t\022r\n\021skewed_col_values\030\002",
+ " \003(\0132W.org.apache.hadoop.hive.metastore." +
+ "hbase.StorageDescriptor.SkewedInfo.Skewe" +
+ "dColValueList\022\206\001\n\036skewed_col_value_locat" +
+ "ion_maps\030\003 \003(\0132^.org.apache.hadoop.hive." +
+ "metastore.hbase.StorageDescriptor.Skewed" +
+ "Info.SkewedColValueLocationMap\032.\n\022Skewed" +
+ "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" +
+ "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" +
+ "\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t" +
+ "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim",
+ "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" +
+ " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" +
+ ".hadoop.hive.metastore.hbase.Parameters\022" +
+ "\017\n\007sd_hash\030\007 \002(\014\022K\n\016partition_keys\030\010 \003(\013" +
+ "23.org.apache.hadoop.hive.metastore.hbas" +
+ "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." +
+ "apache.hadoop.hive.metastore.hbase.Param" +
+ "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" +
+ "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" +
+ "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo",
+ "p.hive.metastore.hbase.PrincipalPrivileg" +
+ "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\353\004\n\026Partition" +
+ "KeyComparator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 " +
+ "\002(\t\022S\n\002op\030\003 \003(\0132G.org.apache.hadoop.hive" +
+ ".metastore.hbase.PartitionKeyComparator." +
+ "Operator\022S\n\005range\030\004 \003(\0132D.org.apache.had" +
+ "oop.hive.metastore.hbase.PartitionKeyCom" +
+ "parator.Range\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\t" +
+ "inclusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R" +
+ "\n\005start\030\002 \001(\0132C.org.apache.hadoop.hive.m",
+ "etastore.hbase.PartitionKeyComparator.Ma" +
+ "rk\022P\n\003end\030\003 \001(\0132C.org.apache.hadoop.hive" +
+ ".metastore.hbase.PartitionKeyComparator." +
+ "Mark\032\241\001\n\010Operator\022Z\n\004type\030\001 \002(\0162L.org.ap" +
+ "ache.hadoop.hive.metastore.hbase.Partiti" +
+ "onKeyComparator.Operator.Type\022\013\n\003key\030\002 \002" +
+ "(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOT" +
+ "EQUALS\020\001*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004R" +
+ "OLE\020\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -35071,7 +34990,7 @@ public Builder removeRange(int index) {
internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor,
- new java.lang.String[] { "ColumnName", "Order", "NullOrder", });
+ new java.lang.String[] { "ColumnName", "Order", });
internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor =
internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(1);
internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable = new
diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 6534d68..2695ffa 100644
--- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -3101,10 +3101,6 @@ void Order::__set_order(const int32_t val) {
this->order = val;
}
-void Order::__set_nullOrder(const int32_t val) {
- this->nullOrder = val;
-}
-
uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -3142,14 +3138,6 @@ uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
- case 3:
- if (ftype == ::apache::thrift::protocol::T_I32) {
- xfer += iprot->readI32(this->nullOrder);
- this->__isset.nullOrder = true;
- } else {
- xfer += iprot->skip(ftype);
- }
- break;
default:
xfer += iprot->skip(ftype);
break;
@@ -3175,10 +3163,6 @@ uint32_t Order::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeI32(this->order);
xfer += oprot->writeFieldEnd();
- xfer += oprot->writeFieldBegin("nullOrder", ::apache::thrift::protocol::T_I32, 3);
- xfer += oprot->writeI32(this->nullOrder);
- xfer += oprot->writeFieldEnd();
-
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -3188,20 +3172,17 @@ void swap(Order &a, Order &b) {
using ::std::swap;
swap(a.col, b.col);
swap(a.order, b.order);
- swap(a.nullOrder, b.nullOrder);
swap(a.__isset, b.__isset);
}
Order::Order(const Order& other139) {
col = other139.col;
order = other139.order;
- nullOrder = other139.nullOrder;
__isset = other139.__isset;
}
Order& Order::operator=(const Order& other140) {
col = other140.col;
order = other140.order;
- nullOrder = other140.nullOrder;
__isset = other140.__isset;
return *this;
}
@@ -3210,7 +3191,6 @@ void Order::printTo(std::ostream& out) const {
out << "Order(";
out << "col=" << to_string(col);
out << ", " << "order=" << to_string(order);
- out << ", " << "nullOrder=" << to_string(nullOrder);
out << ")";
}
diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 3fd2543..97c07a5 100644
--- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -1575,10 +1575,9 @@ inline std::ostream& operator<<(std::ostream& out, const SerDeInfo& obj)
}
typedef struct _Order__isset {
- _Order__isset() : col(false), order(false), nullOrder(false) {}
+ _Order__isset() : col(false), order(false) {}
bool col :1;
bool order :1;
- bool nullOrder :1;
} _Order__isset;
class Order {
@@ -1586,13 +1585,12 @@ class Order {
Order(const Order&);
Order& operator=(const Order&);
- Order() : col(), order(0), nullOrder(0) {
+ Order() : col(), order(0) {
}
virtual ~Order() throw();
std::string col;
int32_t order;
- int32_t nullOrder;
_Order__isset __isset;
@@ -1600,16 +1598,12 @@ class Order {
void __set_order(const int32_t val);
- void __set_nullOrder(const int32_t val);
-
bool operator == (const Order & rhs) const
{
if (!(col == rhs.col))
return false;
if (!(order == rhs.order))
return false;
- if (!(nullOrder == rhs.nullOrder))
- return false;
return true;
}
bool operator != (const Order &rhs) const {
diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
index fd05de5..cc0e2dd 100644
--- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
+++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
@@ -40,7 +40,6 @@
private static final org.apache.thrift.protocol.TField COL_FIELD_DESC = new org.apache.thrift.protocol.TField("col", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.I32, (short)2);
- private static final org.apache.thrift.protocol.TField NULL_ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("nullOrder", org.apache.thrift.protocol.TType.I32, (short)3);
private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>();
static {
@@ -50,13 +49,11 @@
private String col; // required
private int order; // required
- private int nullOrder; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
COL((short)1, "col"),
- ORDER((short)2, "order"),
- NULL_ORDER((short)3, "nullOrder");
+ ORDER((short)2, "order");
private static final Map byName = new HashMap();
@@ -75,8 +72,6 @@ public static _Fields findByThriftId(int fieldId) {
return COL;
case 2: // ORDER
return ORDER;
- case 3: // NULL_ORDER
- return NULL_ORDER;
default:
return null;
}
@@ -118,7 +113,6 @@ public String getFieldName() {
// isset id assignments
private static final int __ORDER_ISSET_ID = 0;
- private static final int __NULLORDER_ISSET_ID = 1;
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
@@ -127,8 +121,6 @@ public String getFieldName() {
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
- tmpMap.put(_Fields.NULL_ORDER, new org.apache.thrift.meta_data.FieldMetaData("nullOrder", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Order.class, metaDataMap);
}
@@ -138,15 +130,12 @@ public Order() {
public Order(
String col,
- int order,
- int nullOrder)
+ int order)
{
this();
this.col = col;
this.order = order;
setOrderIsSet(true);
- this.nullOrder = nullOrder;
- setNullOrderIsSet(true);
}
/**
@@ -158,7 +147,6 @@ public Order(Order other) {
this.col = other.col;
}
this.order = other.order;
- this.nullOrder = other.nullOrder;
}
public Order deepCopy() {
@@ -170,8 +158,6 @@ public void clear() {
this.col = null;
setOrderIsSet(false);
this.order = 0;
- setNullOrderIsSet(false);
- this.nullOrder = 0;
}
public String getCol() {
@@ -219,28 +205,6 @@ public void setOrderIsSet(boolean value) {
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ORDER_ISSET_ID, value);
}
- public int getNullOrder() {
- return this.nullOrder;
- }
-
- public void setNullOrder(int nullOrder) {
- this.nullOrder = nullOrder;
- setNullOrderIsSet(true);
- }
-
- public void unsetNullOrder() {
- __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NULLORDER_ISSET_ID);
- }
-
- /** Returns true if field nullOrder is set (has been assigned a value) and false otherwise */
- public boolean isSetNullOrder() {
- return EncodingUtils.testBit(__isset_bitfield, __NULLORDER_ISSET_ID);
- }
-
- public void setNullOrderIsSet(boolean value) {
- __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NULLORDER_ISSET_ID, value);
- }
-
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case COL:
@@ -259,14 +223,6 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
- case NULL_ORDER:
- if (value == null) {
- unsetNullOrder();
- } else {
- setNullOrder((Integer)value);
- }
- break;
-
}
}
@@ -278,9 +234,6 @@ public Object getFieldValue(_Fields field) {
case ORDER:
return getOrder();
- case NULL_ORDER:
- return getNullOrder();
-
}
throw new IllegalStateException();
}
@@ -296,8 +249,6 @@ public boolean isSet(_Fields field) {
return isSetCol();
case ORDER:
return isSetOrder();
- case NULL_ORDER:
- return isSetNullOrder();
}
throw new IllegalStateException();
}
@@ -333,15 +284,6 @@ public boolean equals(Order that) {
return false;
}
- boolean this_present_nullOrder = true;
- boolean that_present_nullOrder = true;
- if (this_present_nullOrder || that_present_nullOrder) {
- if (!(this_present_nullOrder && that_present_nullOrder))
- return false;
- if (this.nullOrder != that.nullOrder)
- return false;
- }
-
return true;
}
@@ -359,11 +301,6 @@ public int hashCode() {
if (present_order)
list.add(order);
- boolean present_nullOrder = true;
- list.add(present_nullOrder);
- if (present_nullOrder)
- list.add(nullOrder);
-
return list.hashCode();
}
@@ -395,16 +332,6 @@ public int compareTo(Order other) {
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetNullOrder()).compareTo(other.isSetNullOrder());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetNullOrder()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nullOrder, other.nullOrder);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
return 0;
}
@@ -436,10 +363,6 @@ public String toString() {
sb.append("order:");
sb.append(this.order);
first = false;
- if (!first) sb.append(", ");
- sb.append("nullOrder:");
- sb.append(this.nullOrder);
- first = false;
sb.append(")");
return sb.toString();
}
@@ -501,14 +424,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Order struct) throw
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
- case 3: // NULL_ORDER
- if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
- struct.nullOrder = iprot.readI32();
- struct.setNullOrderIsSet(true);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
- }
- break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -530,9 +445,6 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Order struct) thro
oprot.writeFieldBegin(ORDER_FIELD_DESC);
oprot.writeI32(struct.order);
oprot.writeFieldEnd();
- oprot.writeFieldBegin(NULL_ORDER_FIELD_DESC);
- oprot.writeI32(struct.nullOrder);
- oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -557,25 +469,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Order struct) throw
if (struct.isSetOrder()) {
optionals.set(1);
}
- if (struct.isSetNullOrder()) {
- optionals.set(2);
- }
- oprot.writeBitSet(optionals, 3);
+ oprot.writeBitSet(optionals, 2);
if (struct.isSetCol()) {
oprot.writeString(struct.col);
}
if (struct.isSetOrder()) {
oprot.writeI32(struct.order);
}
- if (struct.isSetNullOrder()) {
- oprot.writeI32(struct.nullOrder);
- }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Order struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(3);
+ BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
struct.col = iprot.readString();
struct.setColIsSet(true);
@@ -584,10 +490,6 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Order struct) throws
struct.order = iprot.readI32();
struct.setOrderIsSet(true);
}
- if (incoming.get(2)) {
- struct.nullOrder = iprot.readI32();
- struct.setNullOrderIsSet(true);
- }
}
}
diff --git metastore/src/gen/thrift/gen-php/metastore/Types.php metastore/src/gen/thrift/gen-php/metastore/Types.php
index 4da4707..488a920 100644
--- metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -3076,10 +3076,6 @@ class Order {
* @var int
*/
public $order = null;
- /**
- * @var int
- */
- public $nullOrder = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -3092,10 +3088,6 @@ class Order {
'var' => 'order',
'type' => TType::I32,
),
- 3 => array(
- 'var' => 'nullOrder',
- 'type' => TType::I32,
- ),
);
}
if (is_array($vals)) {
@@ -3105,9 +3097,6 @@ class Order {
if (isset($vals['order'])) {
$this->order = $vals['order'];
}
- if (isset($vals['nullOrder'])) {
- $this->nullOrder = $vals['nullOrder'];
- }
}
}
@@ -3144,13 +3133,6 @@ class Order {
$xfer += $input->skip($ftype);
}
break;
- case 3:
- if ($ftype == TType::I32) {
- $xfer += $input->readI32($this->nullOrder);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
default:
$xfer += $input->skip($ftype);
break;
@@ -3174,11 +3156,6 @@ class Order {
$xfer += $output->writeI32($this->order);
$xfer += $output->writeFieldEnd();
}
- if ($this->nullOrder !== null) {
- $xfer += $output->writeFieldBegin('nullOrder', TType::I32, 3);
- $xfer += $output->writeI32($this->nullOrder);
- $xfer += $output->writeFieldEnd();
- }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 4b20da9..10eaf4a 100644
--- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -2237,20 +2237,17 @@ class Order:
Attributes:
- col
- order
- - nullOrder
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'col', None, None, ), # 1
(2, TType.I32, 'order', None, None, ), # 2
- (3, TType.I32, 'nullOrder', None, None, ), # 3
)
- def __init__(self, col=None, order=None, nullOrder=None,):
+ def __init__(self, col=None, order=None,):
self.col = col
self.order = order
- self.nullOrder = nullOrder
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -2271,11 +2268,6 @@ def read(self, iprot):
self.order = iprot.readI32()
else:
iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.I32:
- self.nullOrder = iprot.readI32()
- else:
- iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -2294,10 +2286,6 @@ def write(self, oprot):
oprot.writeFieldBegin('order', TType.I32, 2)
oprot.writeI32(self.order)
oprot.writeFieldEnd()
- if self.nullOrder is not None:
- oprot.writeFieldBegin('nullOrder', TType.I32, 3)
- oprot.writeI32(self.nullOrder)
- oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -2309,7 +2297,6 @@ def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.col)
value = (value * 31) ^ hash(self.order)
- value = (value * 31) ^ hash(self.nullOrder)
return value
def __repr__(self):
diff --git metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 556c380..1cf40ae 100644
--- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -557,12 +557,10 @@ class Order
include ::Thrift::Struct, ::Thrift::Struct_Union
COL = 1
ORDER = 2
- NULLORDER = 3
FIELDS = {
COL => {:type => ::Thrift::Types::STRING, :name => 'col'},
- ORDER => {:type => ::Thrift::Types::I32, :name => 'order'},
- NULLORDER => {:type => ::Thrift::Types::I32, :name => 'nullOrder'}
+ ORDER => {:type => ::Thrift::Types::I32, :name => 'order'}
}
def struct_fields; FIELDS; end
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 3b31ee1..26fbd20 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -21,7 +21,6 @@
import static org.apache.commons.lang.StringUtils.join;
import static org.apache.commons.lang.StringUtils.repeat;
-import com.google.common.collect.Lists;
import java.sql.Connection;
import java.sql.SQLException;
import java.text.ParseException;
@@ -32,10 +31,12 @@
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+
import javax.jdo.PersistenceManager;
import javax.jdo.Query;
import javax.jdo.Transaction;
import javax.jdo.datastore.JDOConnection;
+
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -72,6 +73,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.collect.Lists;
+
/**
* This class contains the optimizations for MetaStore that rely on direct SQL access to
* the underlying database. It should use ANSI SQL and be compatible with common databases
@@ -672,7 +675,7 @@ public void apply(StorageDescriptor t, Object[] fields) {
t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
}
- queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\", \"SORT_COLS\".\"NULL_ORDER\""
+ queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\""
+ " from \"SORT_COLS\""
+ " where \"SD_ID\" in (" + sdIds + ") and \"INTEGER_IDX\" >= 0"
+ " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
@@ -680,8 +683,7 @@ public void apply(StorageDescriptor t, Object[] fields) {
@Override
public void apply(StorageDescriptor t, Object[] fields) {
if (fields[2] == null) return;
- assert fields[3] != null;
- t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]), extractSqlInt(fields[3])));
+ t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2])));
}});
queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from \"BUCKETING_COLS\""
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 1d04ef2..70cb618 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -37,11 +37,8 @@
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
@@ -57,10 +54,6 @@
import javax.jdo.datastore.DataStoreCache;
import javax.jdo.identity.IntIdentity;
-import com.google.common.annotations.VisibleForTesting;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -145,19 +138,22 @@
import org.apache.hadoop.hive.metastore.model.MVersionTable;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.util.StringUtils;
import org.apache.hive.common.util.HiveStringUtils;
import org.apache.thrift.TException;
+import org.datanucleus.AbstractNucleusContext;
import org.datanucleus.ClassLoaderResolver;
import org.datanucleus.NucleusContext;
-import org.datanucleus.AbstractNucleusContext;
import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
import org.datanucleus.store.rdbms.exceptions.MissingTableException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
/**
@@ -1367,8 +1363,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException,
if (keys != null) {
mkeys = new ArrayList(keys.size());
for (Order part : keys) {
- mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder(),
- part.getNullOrder()));
+ mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder()));
}
}
return mkeys;
@@ -1379,7 +1374,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException,
if (mkeys != null) {
keys = new ArrayList(mkeys.size());
for (MOrder part : mkeys) {
- keys.add(new Order(part.getCol(), part.getOrder(), part.getNullOrder()));
+ keys.add(new Order(part.getCol(), part.getOrder()));
}
}
return keys;
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
index a16997b..9ec7cd5 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
@@ -18,12 +18,22 @@
*/
package org.apache.hadoop.hive.metastore.hbase;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.InvalidProtocolBufferException;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -68,21 +78,12 @@
import org.apache.hadoop.io.BytesWritable;
import org.apache.hive.common.util.BloomFilter;
import org.apache.hive.common.util.HiveStringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.security.MessageDigest;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
/**
* Utility functions
@@ -707,7 +708,7 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized)
sd.setBucketCols(new ArrayList<>(proto.getBucketColsList()));
List sortCols = new ArrayList<>();
for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) {
- sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder(), protoOrder.getNullOrder()));
+ sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder()));
}
sd.setSortCols(sortCols);
if (proto.hasSkewedInfo()) {
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
index 5370c02..732c278 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
@@ -21,16 +21,14 @@
public class MOrder {
private String col;
private int order;
- private int nullOrder;
/**
* @param col
* @param order
*/
- public MOrder(String col, int order, int nullOrder) {
+ public MOrder(String col, int order) {
this.col = col;
this.order = order;
- this.nullOrder = nullOrder;
}
/**
@@ -61,18 +59,4 @@ public void setOrder(int order) {
this.order = order;
}
- /**
- * @return the null order
- */
- public int getNullOrder() {
- return nullOrder;
- }
-
- /**
- * @param nullOrder the null order to set
- */
- public void setNullOrder(int nullOrder) {
- this.nullOrder = nullOrder;
- }
-
}
diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
index 552097b..466fdf9 100644
--- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
+++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
@@ -205,7 +205,6 @@ message StorageDescriptor {
message Order {
required string column_name = 1;
optional sint32 order = 2 [default = 1];
- optional sint32 nullOrder = 3 [default = 0];
}
message SerDeInfo {
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
index d938a03..e4723f6 100644
--- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
+++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
@@ -402,7 +402,7 @@ public void createTable() throws Exception {
Map params = new HashMap();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
- serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+ serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null,
emptyParameters, null, null, null);
store.createTable(table);
@@ -424,7 +424,6 @@ public void createTable() throws Exception {
Assert.assertEquals(1, t.getSd().getSortColsSize());
Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol());
Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder());
- Assert.assertEquals(0, t.getSd().getSortCols().get(0).getNullOrder());
Assert.assertEquals(1, t.getSd().getParametersSize());
Assert.assertEquals("value", t.getSd().getParameters().get("key"));
Assert.assertEquals("me", t.getOwner());
@@ -1274,7 +1273,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw
Map params = new HashMap();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
- serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+ serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
int currentTime = (int)(System.currentTimeMillis() / 1000);
Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
emptyParameters, null, null, null);
@@ -1292,7 +1291,7 @@ private Table createMockTable(String type) throws Exception {
Map params = new HashMap();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
- serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+ serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
int currentTime = (int)(System.currentTimeMillis() / 1000);
Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
emptyParameters, null, null, null);
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
index 570d023..b1dc542 100644
--- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
+++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
@@ -583,7 +583,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw
Map params = new HashMap();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
- serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+ serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
int currentTime = (int)(System.currentTimeMillis() / 1000);
Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
emptyParameters, null, null, null);
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
index 8e856a1..bea0b34 100644
--- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
+++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
@@ -79,22 +79,19 @@ public void changeOnUnset() {
@Test
public void changeOrder() {
StorageDescriptor sd = new StorageDescriptor();
- sd.addToSortCols(new Order("fred", 1, 0));
+ sd.addToSortCols(new Order("fred", 1));
SharedStorageDescriptor ssd = new SharedStorageDescriptor();
ssd.setShared(sd);
ssd.getSortCols().get(0).setOrder(2);
- ssd.getSortCols().get(0).setNullOrder(3);
Assert.assertFalse(sd.getSortCols() == ssd.getSortCols());
Assert.assertEquals(2, ssd.getSortCols().get(0).getOrder());
Assert.assertEquals(1, sd.getSortCols().get(0).getOrder());
- Assert.assertEquals(3, ssd.getSortCols().get(0).getNullOrder());
- Assert.assertEquals(0, sd.getSortCols().get(0).getNullOrder());
}
@Test
public void unsetOrder() {
StorageDescriptor sd = new StorageDescriptor();
- sd.addToSortCols(new Order("fred", 1, 0));
+ sd.addToSortCols(new Order("fred", 1));
SharedStorageDescriptor ssd = new SharedStorageDescriptor();
ssd.setShared(sd);
ssd.unsetSortCols();
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 70afe16..c51cfd6 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -2140,11 +2140,6 @@ private int showCreateTable(Hive db, DataOutputStream outStream, String tableNam
else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) {
sortKeyDesc = sortKeyDesc + "DESC";
}
- if (sortCol.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST) {
- sortKeyDesc = sortKeyDesc + " NULLS FIRST";
- } else if (sortCol.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_LAST) {
- sortKeyDesc = sortKeyDesc + " NULLS LAST";
- }
sortKeys.add(sortKeyDesc);
}
tbl_sort_bucket += StringUtils.join(sortKeys, ", \n");
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 74cb2e0..4a546d1 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.exec;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
@@ -593,9 +592,7 @@ private void updatePartitionBucketSortColumns(Hive db, Table table, Partition pa
newSortCols.add(new Order(
partn.getCols().get(sortCol.getIndexes().get(0)).getName(),
sortCol.getSortOrder() == '+' ? BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC :
- BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC,
- sortCol.getNullSortOrder() == 'a' ? BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST :
- BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_LAST));
+ BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC));
} else {
// If the table is sorted on a partition column, not valid for sorting
updateSortCols = false;
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index ad17096..fdc7956 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -946,7 +946,7 @@ public void createIndex(String tableName, String indexName, String indexHandlerC
FieldSchema col = cols.get(i);
if (indexedCols.contains(col.getName())) {
indexTblCols.add(col);
- sortCols.add(new Order(col.getName(), 1, 0));
+ sortCols.add(new Order(col.getName(), 1));
k++;
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
index 677649d..b57dc77 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
@@ -357,8 +357,7 @@ private boolean checkSortColsAndJoinCols(List sortCols,
Order o = sortCols.get(pos);
if (pos < sortColumnsFirstPartition.size()) {
- if (o.getOrder() != sortColumnsFirstPartition.get(pos).getOrder() ||
- o.getNullOrder() != sortColumnsFirstPartition.get(pos).getNullOrder()) {
+ if (o.getOrder() != sortColumnsFirstPartition.get(pos).getOrder()) {
return false;
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index 3d580d8..da261bb 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -26,7 +26,6 @@
import java.util.Stack;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -50,7 +49,6 @@
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -170,35 +168,16 @@ public BucketSortReduceSinkProcessor(ParseContext pGraphContext) {
List tabCols) {
List sortOrders = new ArrayList();
for (Order sortCol : tabSortCols) {
- int pos = 0;
for (FieldSchema tabCol : tabCols) {
if (sortCol.getCol().equals(tabCol.getName())) {
sortOrders.add(sortCol.getOrder());
break;
}
- pos++;
}
}
return sortOrders;
}
- private List getNullSortOrder(
- List tabSortCols,
- List tabCols) {
- List nullSortOrders = new ArrayList();
- for (Order sortCol : tabSortCols) {
- int pos = 0;
- for (FieldSchema tabCol : tabCols) {
- if (sortCol.getCol().equals(tabCol.getName())) {
- nullSortOrders.add(sortCol.getNullOrder());
- break;
- }
- pos++;
- }
- }
- return nullSortOrders;
- }
-
// Return true if the partition is bucketed/sorted by the specified positions
// The number of buckets, the sort order should also match along with the
// columns which are bucketed/sorted
@@ -206,7 +185,6 @@ private boolean checkPartition(Partition partition,
List bucketPositionsDest,
List sortPositionsDest,
List sortOrderDest,
- List sortNullOrderDest,
int numBucketsDest) {
// The bucketing and sorting positions should exactly match
int numBuckets = partition.getBucketCount();
@@ -220,12 +198,9 @@ private boolean checkPartition(Partition partition,
getSortPositions(partition.getSortCols(), partition.getTable().getCols());
List sortOrder =
getSortOrder(partition.getSortCols(), partition.getTable().getCols());
- List sortNullOrder =
- getNullSortOrder(partition.getSortCols(), partition.getTable().getCols());
return bucketPositionsDest.equals(partnBucketPositions) &&
sortPositionsDest.equals(sortPositions) &&
- sortOrderDest.equals(sortOrder) &&
- sortNullOrderDest.equals(sortNullOrder);
+ sortOrderDest.equals(sortOrder);
}
// Return true if the table is bucketed/sorted by the specified positions
@@ -235,7 +210,6 @@ private boolean checkTable(Table table,
List bucketPositionsDest,
List sortPositionsDest,
List sortOrderDest,
- List sortNullOrderDest,
int numBucketsDest) {
// The bucketing and sorting positions should exactly match
int numBuckets = table.getNumBuckets();
@@ -249,12 +223,9 @@ private boolean checkTable(Table table,
getSortPositions(table.getSortCols(), table.getCols());
List sortOrder =
getSortOrder(table.getSortCols(), table.getCols());
- List sortNullOrder =
- getNullSortOrder(table.getSortCols(), table.getCols());
return bucketPositionsDest.equals(tableBucketPositions) &&
sortPositionsDest.equals(sortPositions) &&
- sortOrderDest.equals(sortOrder) &&
- sortNullOrderDest.equals(sortNullOrder);
+ sortOrderDest.equals(sortOrder);
}
// Store the bucket path to bucket number mapping in the table scan operator.
@@ -332,8 +303,7 @@ private int findColumnPosition(List cols, String colName) {
private boolean validateSMBJoinKeys(SMBJoinDesc smbJoinDesc,
List sourceTableBucketCols,
List sourceTableSortCols,
- List sortOrder,
- List sortNullOrder) {
+ List sortOrder) {
// The sort-merge join creates the output sorted and bucketized by the same columns.
// This can be relaxed in the future if there is a requirement.
if (!sourceTableBucketCols.equals(sourceTableSortCols)) {
@@ -475,8 +445,6 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
getSortPositions(destTable.getSortCols(), destTable.getCols());
List sortOrder =
getSortOrder(destTable.getSortCols(), destTable.getCols());
- List sortNullOrder =
- getNullSortOrder(destTable.getSortCols(), destTable.getCols());
boolean useBucketSortPositions = true;
// Only selects and filters are allowed
@@ -511,7 +479,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
if (!validateSMBJoinKeys(smbJoinDesc, sourceTableBucketCols,
- sourceTableSortCols, sortOrder, sortNullOrder)) {
+ sourceTableSortCols, sortOrder)) {
return null;
}
@@ -586,7 +554,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
for (Partition partition : partitions) {
if (!checkPartition(partition, newBucketPositions, newSortPositions, sortOrder,
- sortNullOrder, numBucketsDestination)) {
+ numBucketsDestination)) {
return null;
}
}
@@ -597,7 +565,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
else {
if (!checkTable(srcTable, newBucketPositions, newSortPositions, sortOrder,
- sortNullOrder, numBucketsDestination)) {
+ numBucketsDestination)) {
return null;
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index 3e6c7c7..adfbb67 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -26,7 +26,6 @@
import java.util.Set;
import java.util.Stack;
-import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -50,7 +49,6 @@
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
@@ -185,7 +183,6 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
destTable.getCols());
List sortPositions = null;
List sortOrder = null;
- List sortNullOrder = null;
if (fsOp.getConf().getWriteType() == AcidUtils.Operation.UPDATE ||
fsOp.getConf().getWriteType() == AcidUtils.Operation.DELETE) {
// When doing updates and deletes we always want to sort on the rowid because the ACID
@@ -193,11 +190,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
// ignore whatever comes from the table and enforce this sort order instead.
sortPositions = Arrays.asList(0);
sortOrder = Arrays.asList(1); // 1 means asc, could really use enum here in the thrift if
- sortNullOrder = Arrays.asList(0);
} else {
sortPositions = getSortPositions(destTable.getSortCols(), destTable.getCols());
sortOrder = getSortOrders(destTable.getSortCols(), destTable.getCols());
- sortNullOrder = getSortNullOrders(destTable.getSortCols(), destTable.getCols());
+ }
+ List sortNullOrder = new ArrayList();
+ for (int order : sortOrder) {
+ sortNullOrder.add(order == 1 ? 0 : 1); // for asc, nulls first; for desc, nulls last
}
LOG.debug("Got sort order");
for (int i : sortPositions) LOG.debug("sort position " + i);
@@ -597,26 +596,6 @@ public ReduceSinkOperator getReduceSinkOp(List partitionPositions,
return sortOrders;
}
- /**
- * Get the null sort order for the sort columns
- * @param tabSortCols
- * @param tabCols
- * @return
- */
- private List getSortNullOrders(List tabSortCols,
- List tabCols) {
- List sortNullOrders = Lists.newArrayList();
- for (Order sortCol : tabSortCols) {
- for (FieldSchema tabCol : tabCols) {
- if (sortCol.getCol().equals(tabCol.getName())) {
- sortNullOrders.add(sortCol.getNullOrder());
- break;
- }
- }
- }
- return sortNullOrders;
- }
-
private ArrayList getPositionsToExprNodes(List pos,
List colInfos) {
ArrayList cols = Lists.newArrayList();
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
index a95da0a..73ca9bf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
@@ -40,8 +40,6 @@
import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.util.ImmutableBitSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -59,6 +57,8 @@
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.Statistics;
import org.apache.hadoop.hive.ql.stats.StatsUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
@@ -160,17 +160,13 @@ public RelNode toRel(ToRelContext context) {
FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
if (field.getName().equals(sortColumn.getCol())) {
Direction direction;
+ NullDirection nullDirection;
if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
direction = Direction.ASCENDING;
- }
- else {
- direction = Direction.DESCENDING;
- }
- NullDirection nullDirection;
- if (sortColumn.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST) {
nullDirection = NullDirection.FIRST;
}
else {
+ direction = Direction.DESCENDING;
nullDirection = NullDirection.LAST;
}
collationList.add(new RelFieldCollation(i,direction,nullDirection));
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
index ea3e179..296fecb 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
@@ -201,16 +201,14 @@ public SortCol() {
private List indexes = new ArrayList();
// Sort order (+|-)
private char sortOrder;
- private char nullSortOrder;
- public SortCol(String name, int index, char sortOrder, char nullSortOrder) {
- this(sortOrder, nullSortOrder);
+ public SortCol(String name, int index, char sortOrder) {
+ this(sortOrder);
addAlias(name, index);
}
- public SortCol(char sortOrder, char nullSortOrder) {
+ public SortCol(char sortOrder) {
this.sortOrder = sortOrder;
- this.nullSortOrder = nullSortOrder;
}
@@ -234,16 +232,11 @@ public char getSortOrder() {
return sortOrder;
}
- public char getNullSortOrder() {
- return nullSortOrder;
- }
-
@Override
// Chooses a representative alias, index, and order to use as the String, the first is used
// because it is set in the constructor
public String toString() {
- return "name: " + names.get(0) + " index: " + indexes.get(0) + " order: " + sortOrder
- + " nullOrder: " + nullSortOrder;
+ return "name: " + names.get(0) + " index: " + indexes.get(0) + " order: " + sortOrder;
}
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
index 9159120..391cfda 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
@@ -166,7 +166,6 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
String sortOrder = rsDesc.getOrder();
- String nullSortOrder = rsDesc.getNullOrder();
List keyCols = rsDesc.getKeyCols();
List valCols = ExprNodeDescUtils.backtrack(joinValues, jop, parent);
@@ -187,8 +186,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
newSortCols[keyIndex].addAlias(vname, vindex);
} else {
newBucketCols[keyIndex] = new BucketCol(vname, vindex);
- newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex),
- nullSortOrder.charAt(keyIndex));
+ newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex));
}
}
}
@@ -313,8 +311,7 @@ private static void findBucketingSortingColumns(List exprs,
int sortIndex = indexOfColName(sortCols, columnExpr.getColumn());
if (sortIndex != -1) {
if (newSortCols[sortIndex] == null) {
- newSortCols[sortIndex] = new SortCol(sortCols.get(sortIndex).getSortOrder(),
- sortCols.get(sortIndex).getNullSortOrder());
+ newSortCols[sortIndex] = new SortCol(sortCols.get(sortIndex).getSortOrder());
}
newSortCols[sortIndex].addAlias(
colInfos.get(colInfosIndex).getInternalName(), colInfosIndex);
@@ -439,7 +436,7 @@ private static int indexOfColName(List extends BucketSortCol> bucketSortCols,
private static List getNewSortCols(List sortCols, List colInfos) {
List newSortCols = new ArrayList(sortCols.size());
for (int i = 0; i < sortCols.size(); i++) {
- SortCol sortCol = new SortCol(sortCols.get(i).getSortOrder(), sortCols.get(i).getNullSortOrder());
+ SortCol sortCol = new SortCol(sortCols.get(i).getSortOrder());
for (Integer index : sortCols.get(i).getIndexes()) {
// The only time this condition should be false is in the case of dynamic partitioning
if (index < colInfos.size()) {
@@ -540,7 +537,6 @@ static void extractTraits(BucketingSortingCtx bctx, ReduceSinkOperator rop, Oper
static List extractSortCols(ReduceSinkOperator rop, List outputValues) {
String sortOrder = rop.getConf().getOrder();
- String nullSortOrder = rop.getConf().getNullOrder();
List sortCols = new ArrayList();
ArrayList keyCols = rop.getConf().getKeyCols();
for (int i = 0; i < keyCols.size(); i++) {
@@ -553,7 +549,7 @@ static void extractTraits(BucketingSortingCtx bctx, ReduceSinkOperator rop, Oper
break;
}
sortCols.add(new SortCol(((ExprNodeColumnDesc) keyCol).getColumn(), index,
- sortOrder.charAt(i), nullSortOrder.charAt(i)));
+ sortOrder.charAt(i)));
}
// If the sorted columns can't all be found in the values then the data is only sorted on
// the columns seen up until now
@@ -654,7 +650,6 @@ protected void processGroupByReduceSink(ReduceSinkOperator rop, GroupByOperator
GroupByDesc groupByDesc = gop.getConf();
String sortOrder = rop.getConf().getOrder();
- String nullSortOrder = rop.getConf().getNullOrder();
List bucketCols = new ArrayList();
List sortCols = new ArrayList();
assert rop.getConf().getKeyCols().size() <= rop.getSchema().getSignature().size();
@@ -665,7 +660,7 @@ protected void processGroupByReduceSink(ReduceSinkOperator rop, GroupByOperator
}
String colName = rop.getSchema().getSignature().get(i).getInternalName();
bucketCols.add(new BucketCol(colName, i));
- sortCols.add(new SortCol(colName, i, sortOrder.charAt(i), nullSortOrder.charAt(i)));
+ sortCols.add(new SortCol(colName, i, sortOrder.charAt(i)));
}
bctx.setBucketedCols(rop, bucketCols);
bctx.setSortedCols(rop, sortCols);
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index f10a40a..b36a9a0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -653,7 +653,7 @@ private static String spliceString(String str, int i, int length, String replace
return colList;
}
- protected List getColumnNamesOrder(ASTNode ast) {
+ protected List getColumnNamesOrder(ASTNode ast) throws SemanticException {
List colList = new ArrayList();
int numCh = ast.getChildCount();
for (int i = 0; i < numCh; i++) {
@@ -662,19 +662,19 @@ private static String spliceString(String str, int i, int length, String replace
child = (ASTNode) child.getChild(0);
if (child.getToken().getType() == HiveParser.TOK_NULLS_FIRST) {
colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
- HIVE_COLUMN_ORDER_ASC, HIVE_COLUMN_NULLS_FIRST));
+ HIVE_COLUMN_ORDER_ASC));
} else {
- colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
- HIVE_COLUMN_ORDER_ASC, HIVE_COLUMN_NULLS_LAST));
+ throw new SemanticException("create/alter table: "
+ + "not supported NULLS LAST for ORDER BY in ASC order");
}
} else {
child = (ASTNode) child.getChild(0);
if (child.getToken().getType() == HiveParser.TOK_NULLS_LAST) {
colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
- HIVE_COLUMN_ORDER_DESC, HIVE_COLUMN_NULLS_LAST));
+ HIVE_COLUMN_ORDER_DESC));
} else {
- colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
- HIVE_COLUMN_ORDER_DESC, HIVE_COLUMN_NULLS_FIRST));
+ throw new SemanticException("create/alter table: "
+ + "not supported NULLS FIRST for ORDER BY in DESC order");
}
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 3e91e10..9ab091d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -117,7 +117,6 @@
import org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.ColumnPruner;
import org.apache.hadoop.hive.ql.optimizer.Optimizer;
import org.apache.hadoop.hive.ql.optimizer.Transform;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
@@ -6358,7 +6357,6 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb,
(dest_tab.getSortCols().size() > 0)) {
sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
sortOrders = getSortOrders(dest, qb, dest_tab, input);
- nullSortOrders = getNullSortOrders(dest, qb, dest_tab, input);
if (!enforceBucketing && !dest_tab.isIndexTable()) {
throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
} else {
@@ -6393,12 +6391,10 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb,
}
StringBuilder order = new StringBuilder();
+ StringBuilder nullOrder = new StringBuilder();
for (int sortOrder : sortOrders) {
order.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? '+' : '-');
- }
- StringBuilder nullOrder = new StringBuilder();
- for (int pos : nullSortOrders) {
- nullOrder.append(pos == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST ? 'a' : 'z');
+ nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z');
}
input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(),
maxReducers, (AcidUtils.isAcidTable(dest_tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID));
@@ -7362,23 +7358,6 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input,
return orders;
}
- private ArrayList getNullSortOrders(String dest, QB qb, Table tab, Operator input)
- throws SemanticException {
- List tabSortCols = tab.getSortCols();
- List tabCols = tab.getCols();
-
- ArrayList orders = new ArrayList();
- for (Order sortCol : tabSortCols) {
- for (FieldSchema tabCol : tabCols) {
- if (sortCol.getCol().equals(tabCol.getName())) {
- orders.add(sortCol.getNullOrder());
- break;
- }
- }
- }
- return orders;
- }
-
private Operator genReduceSinkPlan(String dest, QB qb, Operator> input,
int numReducers, boolean hasOrderBy) throws SemanticException {
diff --git ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index 7f6430f..cf7eb70 100644
--- ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -224,7 +224,7 @@ public void inputSplitNullBase() throws Exception {
@Test
public void sortedTable() throws Exception {
List sortCols = new ArrayList(1);
- sortCols.add(new Order("b", 1, 0));
+ sortCols.add(new Order("b", 1));
Table t = newTable("default", "st", false, new HashMap(), sortCols, false);
@@ -249,7 +249,7 @@ public void sortedTable() throws Exception {
@Test
public void sortedPartition() throws Exception {
List sortCols = new ArrayList(1);
- sortCols.add(new Order("b", 1, 0));
+ sortCols.add(new Order("b", 1));
Table t = newTable("default", "sp", true, new HashMap(), sortCols, false);
Partition p = newPartition(t, "today", sortCols);
diff --git ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
index 9eea7f7..3b71598 100644
--- ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
+++ ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
@@ -232,7 +232,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 8
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -281,7 +281,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 8
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test changing the bucket columns
@@ -326,7 +326,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 8
Bucket Columns: [value]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -375,7 +375,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 8
Bucket Columns: [value]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test changing the number of buckets
@@ -420,7 +420,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [value]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -469,7 +469,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [value]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test changing the sort columns
@@ -514,7 +514,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:0, nullOrder:1)]
+Sort Columns: [Order(col:value, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -563,7 +563,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:0, nullOrder:1)]
+Sort Columns: [Order(col:value, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test changing the sort order
@@ -608,7 +608,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -657,7 +657,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test a sorted partition gets converted to unsorted
diff --git ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
index af2f47a..cab3de4 100644
--- ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
+++ ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
@@ -408,7 +408,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 12
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test changing sort order
@@ -455,7 +455,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 12
Bucket Columns: [key]
-Sort Columns: [Order(col:value, order:0, nullOrder:1)]
+Sort Columns: [Order(col:value, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test removing test order
diff --git ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
index 46d5b34..184d2e4 100644
--- ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
+++ ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
@@ -110,7 +110,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [b]
-Sort Columns: [Order(col:b, order:0, nullOrder:1)]
+Sort Columns: [Order(col:b, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Turn off clustering for a partition
@@ -200,7 +200,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [a, b]
-Sort Columns: [Order(col:a, order:0, nullOrder:1), Order(col:b, order:1, nullOrder:0)]
+Sort Columns: [Order(col:a, order:0), Order(col:b, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: drop table alter_table_partition_clusterby_sortby
diff --git ql/src/test/results/clientpositive/alter_table_not_sorted.q.out ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
index 8a88bc4..6e1ec59 100644
--- ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
+++ ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
@@ -34,7 +34,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [a]
-Sort Columns: [Order(col:a, order:1, nullOrder:0)]
+Sort Columns: [Order(col:a, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: alter table alter_table_not_sorted not sorted
diff --git ql/src/test/results/clientpositive/authorization_index.q.out ql/src/test/results/clientpositive/authorization_index.q.out
index a6ec998..adc02ad 100644
--- ql/src/test/results/clientpositive/authorization_index.q.out
+++ ql/src/test/results/clientpositive/authorization_index.q.out
@@ -41,7 +41,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
-Sort Columns: [Order(col:a, order:1, nullOrder:0)]
+Sort Columns: [Order(col:a, order:1)]
PREHOOK: query: alter index t1_index on t1 rebuild
PREHOOK: type: ALTERINDEX_REBUILD
PREHOOK: Input: default@t1
diff --git ql/src/test/results/clientpositive/bucket5.q.out ql/src/test/results/clientpositive/bucket5.q.out
index aa47ba8..fa926aa 100644
--- ql/src/test/results/clientpositive/bucket5.q.out
+++ ql/src/test/results/clientpositive/bucket5.q.out
@@ -535,7 +535,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 2
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: SELECT * FROM bucketed_table TABLESAMPLE (BUCKET 1 OUT OF 2) s LIMIT 10
diff --git ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
index c5d695e..d03bfe4 100644
--- ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
+++ ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
@@ -1155,7 +1155,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1196,7 +1196,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select count(*) from over1k_part_orc
@@ -2129,7 +2129,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2170,7 +2170,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: explain select * from over1k_part_buck_sort2_orc
@@ -2339,7 +2339,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2380,7 +2380,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: explain select * from over1k_part_buck_sort2_orc
diff --git ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 9a222b1..857d609 100644
--- ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -1060,7 +1060,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1101,7 +1101,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select count(*) from over1k_part
@@ -2027,7 +2027,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2068,7 +2068,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select * from over1k_part_buck_sort2
@@ -2170,7 +2170,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2211,7 +2211,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select * from over1k_part_buck_sort2
diff --git ql/src/test/results/clientpositive/index_skewtable.q.out ql/src/test/results/clientpositive/index_skewtable.q.out
index 45e6834..972789d 100644
--- ql/src/test/results/clientpositive/index_skewtable.q.out
+++ ql/src/test/results/clientpositive/index_skewtable.q.out
@@ -57,7 +57,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
PREHOOK: query: ALTER INDEX kv_index ON kv REBUILD
PREHOOK: type: ALTERINDEX_REBUILD
PREHOOK: Input: default@kv
diff --git ql/src/test/results/clientpositive/infer_bucket_sort.q.out ql/src/test/results/clientpositive/infer_bucket_sort.q.out
index 709ca10..182dd3a 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort.q.out
@@ -62,7 +62,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by where a key isn't selected, should not be bucketed or sorted
@@ -168,7 +168,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys
@@ -221,7 +221,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join with two keys and only one selected, should not be bucketed or sorted
@@ -327,7 +327,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key
@@ -380,7 +380,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test distribute by, should only be bucketed by key
@@ -486,7 +486,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test sort by desc, should be sorted by key
@@ -539,7 +539,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test cluster by, should be bucketed and sorted by key
@@ -592,7 +592,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other
@@ -645,7 +645,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join in simple subquery, should be bucketed and sorted on key
@@ -698,7 +698,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join in simple subquery renaming key column, should be bucketed and sorted on key
@@ -751,7 +751,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in simple subquery, should be bucketed and sorted on key
@@ -804,7 +804,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key
@@ -857,7 +857,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery with where outside, should still be bucketed and sorted on key
@@ -910,7 +910,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery with expression on value, should still be bucketed and sorted on key
@@ -963,7 +963,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery with lateral view outside, should still be bucketed and sorted
@@ -1016,7 +1016,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the
@@ -1071,7 +1071,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery with select on outside reordering the columns, should be bucketed and
@@ -1126,7 +1126,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key
@@ -1179,7 +1179,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery followed by sort by, should only be sorted by the sort key
@@ -1232,7 +1232,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery followed by transform script, should not be bucketed or sorted
@@ -1338,6 +1338,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
index 6db9428..33d795b 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
@@ -64,7 +64,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 3
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
index e5626a4..2f7e538 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
@@ -123,6 +123,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
index 5f983c7..7e3b48f 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
@@ -302,7 +302,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12')
@@ -342,7 +342,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: CREATE TABLE srcpart_merge_dp LIKE srcpart
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
index 90a71ae..ebfce60 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
@@ -144,7 +144,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test rollup, should be bucketed and sorted on key, value, grouping_key
@@ -203,7 +203,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value, grouping_key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test cube, should not be bucketed or sorted because its missing the grouping ID
@@ -328,7 +328,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test cube, should be bucketed and sorted on key, value, grouping_key
@@ -387,7 +387,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value, grouping_key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test grouping sets, should not be bucketed or sorted because its missing the grouping ID
@@ -512,7 +512,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test grouping sets, should be bucketed and sorted on key, value, grouping_key
@@ -571,6 +571,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key, value, grouping_key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
index 5ebcf41..4ca6517 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
@@ -361,7 +361,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test SMB join doesn't affect inference, should not be bucketed or sorted
@@ -640,6 +640,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
index 97cb92d..53407c5 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
@@ -117,6 +117,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 2
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
index e59026e..d4c22f4 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
@@ -172,7 +172,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2')
@@ -211,7 +211,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- The first partition should be bucketed and sorted, the second should not
@@ -272,7 +272,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2')
@@ -372,7 +372,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2')
@@ -411,6 +411,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
index 91466f1..1e4db29 100644
--- ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
+++ ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
@@ -64,7 +64,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join, should be bucketed and sorted by join key
@@ -117,7 +117,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys
@@ -170,7 +170,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [key, value]
-Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key
@@ -223,7 +223,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key
@@ -276,7 +276,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 16
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the
@@ -331,6 +331,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
diff --git ql/src/test/results/clientpositive/orc_analyze.q.out ql/src/test/results/clientpositive/orc_analyze.q.out
index 9b7e7b7..87855fa 100644
--- ql/src/test/results/clientpositive/orc_analyze.q.out
+++ ql/src/test/results/clientpositive/orc_analyze.q.out
@@ -917,7 +917,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -960,7 +960,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
@@ -1015,7 +1015,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1058,7 +1058,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
@@ -1113,7 +1113,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1156,7 +1156,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: drop table orc_create_people
@@ -1262,7 +1262,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1305,7 +1305,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: drop table orc_create_people
diff --git ql/src/test/results/clientpositive/partition_coltype_literals.q.out ql/src/test/results/clientpositive/partition_coltype_literals.q.out
index 1a93b7a..bc159eb 100644
--- ql/src/test/results/clientpositive/partition_coltype_literals.q.out
+++ ql/src/test/results/clientpositive/partition_coltype_literals.q.out
@@ -154,7 +154,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- rename partition
@@ -205,7 +205,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:0, nullOrder:1)]
+Sort Columns: [Order(col:key, order:0)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- insert partition
diff --git ql/src/test/results/clientpositive/show_create_table_alter.q.out ql/src/test/results/clientpositive/show_create_table_alter.q.out
index 78a34de..32819ea 100644
--- ql/src/test/results/clientpositive/show_create_table_alter.q.out
+++ ql/src/test/results/clientpositive/show_create_table_alter.q.out
@@ -24,7 +24,7 @@ CREATE EXTERNAL TABLE `tmp_showcrt1`(
CLUSTERED BY (
key)
SORTED BY (
- value DESC NULLS LAST)
+ value DESC)
INTO 5 BUCKETS
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
@@ -59,7 +59,7 @@ COMMENT 'temporary table'
CLUSTERED BY (
key)
SORTED BY (
- value DESC NULLS LAST)
+ value DESC)
INTO 5 BUCKETS
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
@@ -98,7 +98,7 @@ COMMENT 'changed comment'
CLUSTERED BY (
key)
SORTED BY (
- value DESC NULLS LAST)
+ value DESC)
INTO 5 BUCKETS
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
@@ -136,7 +136,7 @@ COMMENT 'changed comment'
CLUSTERED BY (
key)
SORTED BY (
- value DESC NULLS LAST)
+ value DESC)
INTO 5 BUCKETS
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
@@ -174,7 +174,7 @@ COMMENT 'changed comment'
CLUSTERED BY (
key)
SORTED BY (
- value DESC NULLS LAST)
+ value DESC)
INTO 5 BUCKETS
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
diff --git ql/src/test/results/clientpositive/spark/bucket5.q.out ql/src/test/results/clientpositive/spark/bucket5.q.out
index af6d399..5baf054 100644
--- ql/src/test/results/clientpositive/spark/bucket5.q.out
+++ ql/src/test/results/clientpositive/spark/bucket5.q.out
@@ -391,7 +391,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 2
Bucket Columns: [key]
-Sort Columns: [Order(col:key, order:1, nullOrder:0)]
+Sort Columns: [Order(col:key, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: SELECT * FROM bucketed_table TABLESAMPLE (BUCKET 1 OUT OF 2) s LIMIT 10
diff --git ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
index 6db9428..33d795b 100644
--- ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
+++ ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
@@ -64,7 +64,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 3
Bucket Columns: [value]
-Sort Columns: [Order(col:value, order:1, nullOrder:0)]
+Sort Columns: [Order(col:value, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is
diff --git ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index f7a94ac..a90e3f6 100644
--- ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@ -1215,7 +1215,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1256,7 +1256,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select count(*) from over1k_part_orc
@@ -2255,7 +2255,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2296,7 +2296,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: explain select * from over1k_part_buck_sort2_orc
@@ -2471,7 +2471,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2512,7 +2512,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: explain select * from over1k_part_buck_sort2_orc
diff --git ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index 01dc4ae..5292106 100644
--- ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -1128,7 +1128,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1169,7 +1169,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select count(*) from over1k_part
@@ -2152,7 +2152,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2193,7 +2193,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select * from over1k_part_buck_sort2
@@ -2295,7 +2295,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2336,7 +2336,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: 1
Bucket Columns: [si]
-Sort Columns: [Order(col:f, order:1, nullOrder:0)]
+Sort Columns: [Order(col:f, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: select * from over1k_part_buck_sort2
diff --git ql/src/test/results/clientpositive/tez/orc_analyze.q.out ql/src/test/results/clientpositive/tez/orc_analyze.q.out
index 9b7e7b7..87855fa 100644
--- ql/src/test/results/clientpositive/tez/orc_analyze.q.out
+++ ql/src/test/results/clientpositive/tez/orc_analyze.q.out
@@ -917,7 +917,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -960,7 +960,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
@@ -1015,7 +1015,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1058,7 +1058,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
@@ -1113,7 +1113,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1156,7 +1156,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: drop table orc_create_people
@@ -1262,7 +1262,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1305,7 +1305,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
Compressed: No
Num Buckets: 4
Bucket Columns: [first_name]
-Sort Columns: [Order(col:last_name, order:1, nullOrder:0)]
+Sort Columns: [Order(col:last_name, order:1)]
Storage Desc Params:
serialization.format 1
PREHOOK: query: drop table orc_create_people
diff --git serde/if/serde.thrift serde/if/serde.thrift
index 0b3804d..6caad36 100644
--- serde/if/serde.thrift
+++ serde/if/serde.thrift
@@ -30,7 +30,7 @@ const string SERIALIZATION_NULL_FORMAT = "serialization.null.format"
const string SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf"
const string SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest"
const string SERIALIZATION_SORT_ORDER = "serialization.sort.order"
-const string SERIALIZATION_NULL_POSITION = "serialization.null.position";
+const string SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null";
const string SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"
const string SERIALIZATION_ENCODING = "serialization.encoding"
diff --git serde/src/gen/thrift/gen-cpp/serde_constants.cpp serde/src/gen/thrift/gen-cpp/serde_constants.cpp
index 75701e2..3a675bf 100644
--- serde/src/gen/thrift/gen-cpp/serde_constants.cpp
+++ serde/src/gen/thrift/gen-cpp/serde_constants.cpp
@@ -27,7 +27,7 @@ serdeConstants::serdeConstants() {
SERIALIZATION_SORT_ORDER = "serialization.sort.order";
- SERIALIZATION_NULL_POSITION = "serialization.null.position";
+ SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null";
SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object";
diff --git serde/src/gen/thrift/gen-cpp/serde_constants.h serde/src/gen/thrift/gen-cpp/serde_constants.h
index 6d85928..a5f33fb 100644
--- serde/src/gen/thrift/gen-cpp/serde_constants.h
+++ serde/src/gen/thrift/gen-cpp/serde_constants.h
@@ -23,7 +23,7 @@ class serdeConstants {
std::string SERIALIZATION_ESCAPE_CRLF;
std::string SERIALIZATION_LAST_COLUMN_TAKES_REST;
std::string SERIALIZATION_SORT_ORDER;
- std::string SERIALIZATION_NULL_POSITION;
+ std::string SERIALIZATION_NULL_SORT_ORDER;
std::string SERIALIZATION_USE_JSON_OBJECTS;
std::string SERIALIZATION_ENCODING;
std::string FIELD_DELIM;
diff --git serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
index 0bc6dd7..18c3991 100644
--- serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
+++ serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
@@ -26,7 +26,7 @@ final class Constant extends \Thrift\Type\TConstant {
static protected $SERIALIZATION_ESCAPE_CRLF;
static protected $SERIALIZATION_LAST_COLUMN_TAKES_REST;
static protected $SERIALIZATION_SORT_ORDER;
- static protected $SERIALIZATION_NULL_POSITION;
+ static protected $SERIALIZATION_NULL_SORT_ORDER;
static protected $SERIALIZATION_USE_JSON_OBJECTS;
static protected $SERIALIZATION_ENCODING;
static protected $FIELD_DELIM;
@@ -98,8 +98,8 @@ final class Constant extends \Thrift\Type\TConstant {
return "serialization.sort.order";
}
- static protected function init_SERIALIZATION_NULL_POSITION() {
- return "serialization.null.position";
+ static protected function init_SERIALIZATION_NULL_SORT_ORDER() {
+ return "serialization.sort.order.null";
}
static protected function init_SERIALIZATION_USE_JSON_OBJECTS() {
diff --git serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
index 7939791..fafdc24 100644
--- serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
+++ serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
@@ -17,7 +17,7 @@
SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf"
SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest"
SERIALIZATION_SORT_ORDER = "serialization.sort.order"
-SERIALIZATION_NULL_POSITION = "serialization.null.position"
+SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null"
SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"
SERIALIZATION_ENCODING = "serialization.encoding"
FIELD_DELIM = "field.delim"
diff --git serde/src/gen/thrift/gen-rb/serde_constants.rb serde/src/gen/thrift/gen-rb/serde_constants.rb
index d09e3c2..0ce9f27 100644
--- serde/src/gen/thrift/gen-rb/serde_constants.rb
+++ serde/src/gen/thrift/gen-rb/serde_constants.rb
@@ -23,7 +23,7 @@ SERIALIZATION_LAST_COLUMN_TAKES_REST = %q"serialization.last.column.takes.rest"
SERIALIZATION_SORT_ORDER = %q"serialization.sort.order"
-SERIALIZATION_NULL_POSITION = %q"serialization.null.position"
+SERIALIZATION_NULL_SORT_ORDER = %q"serialization.sort.order.null"
SERIALIZATION_USE_JSON_OBJECTS = %q"serialization.use.json.object"