diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 2d0a1e3..691c6b4 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -159,6 +159,7 @@ minitez.query.files.shared=acid_globallimit.q,\ metadata_only_queries.q,\ metadata_only_queries_with_filters.q,\ nonmr_fetch_threshold.q,\ + order_null.q,\ optimize_nullscan.q,\ orc_analyze.q,\ orc_merge1.q,\ diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index 1abcfe5..b9fc85a 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -226,8 +226,9 @@ struct SerDeInfo { // sort order of a column (column name along with asc(1)/desc(0)) struct Order { - 1: string col, // sort column name - 2: i32 order // asc(1) or desc(0) + 1: string col, // sort column name + 2: i32 order, // asc(1) or desc(0) + 3: i32 nullOrder // nulls first(0) or nulls last(1) } // this object holds all the information about skewed table diff --git metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql new file mode 100644 index 0000000..a8b48bf --- /dev/null +++ metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql @@ -0,0 +1,2 @@ +ALTER TABLE "APP".SORT_COLS ADD "NULL_ORDER" INTEGER NOT NULL DEFAULT 0; +UPDATE "APP".SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0; diff --git metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql index 42f4eb6..00c49ae 100644 --- metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql +++ metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql @@ -28,7 +28,7 @@ CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCH CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(1000) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); -CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); +CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "NULL_ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL); diff --git metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql index a0bac3c..13aeabb 100644 --- metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql +++ metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql @@ -1,4 +1,5 @@ -- Upgrade MetaStore schema from 2.0.0 to 2.1.0 RUN '033-HIVE-12892.derby.sql'; +RUN '034-HIVE-12994.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java index 416ae9d..ca928b9 100644 --- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java +++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/Metastore.java @@ -447,15 +447,15 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfo ot public final boolean isInitialized() { if (!hasOffset()) { - + return false; } if (!hasLength()) { - + return false; } if (!hasIndex()) { - + return false; } return true; @@ -597,7 +597,7 @@ public Builder clearIndex() { /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; */ - java.util.List + java.util.List getInfosList(); /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; @@ -610,7 +610,7 @@ public Builder clearIndex() { /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; */ - java.util.List + java.util.List getInfosOrBuilderList(); /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; @@ -731,7 +731,7 @@ public SplitInfos parsePartialFrom( /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; */ - public java.util.List + public java.util.List getInfosOrBuilderList() { return infos_; } @@ -984,7 +984,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfos o infosBuilder_ = null; infos_ = other.infos_; bitField0_ = (bitField0_ & ~0x00000001); - infosBuilder_ = + infosBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getInfosFieldBuilder() : null; } else { @@ -999,7 +999,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfos o public final boolean isInitialized() { for (int i = 0; i < getInfosCount(); i++) { if (!getInfos(i).isInitialized()) { - + return false; } } @@ -1220,7 +1220,7 @@ public Builder removeInfos(int index) { /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; */ - public java.util.List + public java.util.List getInfosOrBuilderList() { if (infosBuilder_ != null) { return infosBuilder_.getMessageOrBuilderList(); @@ -1246,12 +1246,12 @@ public Builder removeInfos(int index) { /** * repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1; */ - public java.util.List + public java.util.List getInfosBuilderList() { return getInfosFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hive.metastore.Metastore.SplitInfo, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> + org.apache.hadoop.hive.metastore.Metastore.SplitInfo, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder, org.apache.hadoop.hive.metastore.Metastore.SplitInfoOrBuilder> getInfosFieldBuilder() { if (infosBuilder_ == null) { infosBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java index 3b2d7b5..3057fff 100644 --- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -22582,6 +22582,16 @@ public StorageDescriptor parsePartialFrom( * optional sint32 order = 2 [default = 1]; */ int getOrder(); + + // optional sint32 nullOrder = 3 [default = 0]; + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + boolean hasNullOrder(); + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + int getNullOrder(); } /** * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order} @@ -22644,6 +22654,11 @@ private Order( order_ = input.readSInt32(); break; } + case 24: { + bitField0_ |= 0x00000004; + nullOrder_ = input.readSInt32(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -22743,9 +22758,26 @@ public int getOrder() { return order_; } + // optional sint32 nullOrder = 3 [default = 0]; + public static final int NULLORDER_FIELD_NUMBER = 3; + private int nullOrder_; + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + public boolean hasNullOrder() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + public int getNullOrder() { + return nullOrder_; + } + private void initFields() { columnName_ = ""; order_ = 1; + nullOrder_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -22769,6 +22801,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeSInt32(2, order_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeSInt32(3, nullOrder_); + } getUnknownFields().writeTo(output); } @@ -22786,6 +22821,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeSInt32Size(2, order_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt32Size(3, nullOrder_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -22906,6 +22945,8 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000001); order_ = 1; bitField0_ = (bitField0_ & ~0x00000002); + nullOrder_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -22942,6 +22983,10 @@ public Builder clone() { to_bitField0_ |= 0x00000002; } result.order_ = order_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.nullOrder_ = nullOrder_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -22966,6 +23011,9 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr if (other.hasOrder()) { setOrder(other.getOrder()); } + if (other.hasNullOrder()) { + setNullOrder(other.getNullOrder()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -23104,6 +23152,39 @@ public Builder clearOrder() { return this; } + // optional sint32 nullOrder = 3 [default = 0]; + private int nullOrder_ ; + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + public boolean hasNullOrder() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + public int getNullOrder() { + return nullOrder_; + } + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + public Builder setNullOrder(int value) { + bitField0_ |= 0x00000004; + nullOrder_ = value; + onChanged(); + return this; + } + /** + * optional sint32 nullOrder = 3 [default = 0]; + */ + public Builder clearNullOrder() { + bitField0_ = (bitField0_ & ~0x00000004); + nullOrder_ = 0; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order) } @@ -34747,7 +34828,7 @@ public Builder removeRange(int index) { "ant_info\030\001 \003(\01325.org.apache.hadoop.hive." + "metastore.hbase.RoleGrantInfo\"\030\n\010RoleLis", "t\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013create_time\030\001" + - " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021StorageDesc" + + " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\302\010\n\021StorageDesc" + "riptor\022A\n\004cols\030\001 \003(\01323.org.apache.hadoop" + ".hive.metastore.hbase.FieldSchema\022\024\n\014inp" + "ut_format\030\002 \001(\t\022\025\n\routput_format\030\003 \001(\t\022\025" + @@ -34760,51 +34841,51 @@ public Builder removeRange(int index) { "skewed_info\030\t \001(\0132D.org.apache.hadoop.hi" + "ve.metastore.hbase.StorageDescriptor.Ske" + "wedInfo\022!\n\031stored_as_sub_directories\030\n \001" + - "(\010\032.\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" + - "r\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004name\030\001 \001(\t\022\031\n" + - "\021serialization_lib\030\002 \001(\t\022F\n\nparameters\030\003" + - " \001(\01322.org.apache.hadoop.hive.metastore." + - "hbase.Parameters\032\214\003\n\nSkewedInfo\022\030\n\020skewe" + - "d_col_names\030\001 \003(\t\022r\n\021skewed_col_values\030\002", - " \003(\0132W.org.apache.hadoop.hive.metastore." + - "hbase.StorageDescriptor.SkewedInfo.Skewe" + - "dColValueList\022\206\001\n\036skewed_col_value_locat" + - "ion_maps\030\003 \003(\0132^.org.apache.hadoop.hive." + - "metastore.hbase.StorageDescriptor.Skewed" + - "Info.SkewedColValueLocationMap\032.\n\022Skewed" + - "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" + - "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" + - "\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + - "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim", - "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" + - " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" + - ".hadoop.hive.metastore.hbase.Parameters\022" + - "\017\n\007sd_hash\030\007 \002(\014\022K\n\016partition_keys\030\010 \003(\013" + - "23.org.apache.hadoop.hive.metastore.hbas" + - "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." + - "apache.hadoop.hive.metastore.hbase.Param" + - "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" + - "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" + - "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo", - "p.hive.metastore.hbase.PrincipalPrivileg" + - "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\353\004\n\026Partition" + - "KeyComparator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 " + - "\002(\t\022S\n\002op\030\003 \003(\0132G.org.apache.hadoop.hive" + - ".metastore.hbase.PartitionKeyComparator." + - "Operator\022S\n\005range\030\004 \003(\0132D.org.apache.had" + - "oop.hive.metastore.hbase.PartitionKeyCom" + - "parator.Range\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\t" + - "inclusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R" + - "\n\005start\030\002 \001(\0132C.org.apache.hadoop.hive.m", - "etastore.hbase.PartitionKeyComparator.Ma" + - "rk\022P\n\003end\030\003 \001(\0132C.org.apache.hadoop.hive" + - ".metastore.hbase.PartitionKeyComparator." + - "Mark\032\241\001\n\010Operator\022Z\n\004type\030\001 \002(\0162L.org.ap" + - "ache.hadoop.hive.metastore.hbase.Partiti" + - "onKeyComparator.Operator.Type\022\013\n\003key\030\002 \002" + - "(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOT" + - "EQUALS\020\001*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004R" + - "OLE\020\001" + "(\010\032D\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" + + "r\030\002 \001(\021:\0011\022\024\n\tnullOrder\030\003 \001(\021:\0010\032|\n\tSerD" + + "eInfo\022\014\n\004name\030\001 \001(\t\022\031\n\021serialization_lib" + + "\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.org.apache." + + "hadoop.hive.metastore.hbase.Parameters\032\214" + + "\003\n\nSkewedInfo\022\030\n\020skewed_col_names\030\001 \003(\t\022", + "r\n\021skewed_col_values\030\002 \003(\0132W.org.apache." + + "hadoop.hive.metastore.hbase.StorageDescr" + + "iptor.SkewedInfo.SkewedColValueList\022\206\001\n\036" + + "skewed_col_value_location_maps\030\003 \003(\0132^.o" + + "rg.apache.hadoop.hive.metastore.hbase.St" + + "orageDescriptor.SkewedInfo.SkewedColValu" + + "eLocationMap\032.\n\022SkewedColValueList\022\030\n\020sk" + + "ewed_col_value\030\001 \003(\t\0327\n\031SkewedColValueLo" + + "cationMap\022\013\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004" + + "\n\005Table\022\r\n\005owner\030\001 \001(\t\022\023\n\013create_time\030\002 ", + "\001(\003\022\030\n\020last_access_time\030\003 \001(\003\022\021\n\tretenti" + + "on\030\004 \001(\003\022\020\n\010location\030\005 \001(\t\022I\n\rsd_paramet" + + "ers\030\006 \001(\01322.org.apache.hadoop.hive.metas" + + "tore.hbase.Parameters\022\017\n\007sd_hash\030\007 \002(\014\022K" + + "\n\016partition_keys\030\010 \003(\01323.org.apache.hado" + + "op.hive.metastore.hbase.FieldSchema\022F\n\np" + + "arameters\030\t \001(\01322.org.apache.hadoop.hive" + + ".metastore.hbase.Parameters\022\032\n\022view_orig" + + "inal_text\030\n \001(\t\022\032\n\022view_expanded_text\030\013 " + + "\001(\t\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivileges\030\r ", + "\001(\0132=.org.apache.hadoop.hive.metastore.h" + + "base.PrincipalPrivilegeSet\022\024\n\014is_tempora" + + "ry\030\016 \001(\010\"\353\004\n\026PartitionKeyComparator\022\r\n\005n" + + "ames\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G." + + "org.apache.hadoop.hive.metastore.hbase.P" + + "artitionKeyComparator.Operator\022S\n\005range\030" + + "\004 \003(\0132D.org.apache.hadoop.hive.metastore" + + ".hbase.PartitionKeyComparator.Range\032(\n\004M" + + "ark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001" + + "\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.or", + "g.apache.hadoop.hive.metastore.hbase.Par" + + "titionKeyComparator.Mark\022P\n\003end\030\003 \001(\0132C." + + "org.apache.hadoop.hive.metastore.hbase.P" + + "artitionKeyComparator.Mark\032\241\001\n\010Operator\022" + + "Z\n\004type\030\001 \002(\0162L.org.apache.hadoop.hive.m" + + "etastore.hbase.PartitionKeyComparator.Op" + + "erator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n" + + "\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*#\n\rPrinci" + + "palType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -34990,7 +35071,7 @@ public Builder removeRange(int index) { internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor, - new java.lang.String[] { "ColumnName", "Order", }); + new java.lang.String[] { "ColumnName", "Order", "NullOrder", }); internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor = internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(1); internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable = new diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 2695ffa..6534d68 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -3101,6 +3101,10 @@ void Order::__set_order(const int32_t val) { this->order = val; } +void Order::__set_nullOrder(const int32_t val) { + this->nullOrder = val; +} + uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -3138,6 +3142,14 @@ uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->nullOrder); + this->__isset.nullOrder = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -3163,6 +3175,10 @@ uint32_t Order::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI32(this->order); xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldBegin("nullOrder", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32(this->nullOrder); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -3172,17 +3188,20 @@ void swap(Order &a, Order &b) { using ::std::swap; swap(a.col, b.col); swap(a.order, b.order); + swap(a.nullOrder, b.nullOrder); swap(a.__isset, b.__isset); } Order::Order(const Order& other139) { col = other139.col; order = other139.order; + nullOrder = other139.nullOrder; __isset = other139.__isset; } Order& Order::operator=(const Order& other140) { col = other140.col; order = other140.order; + nullOrder = other140.nullOrder; __isset = other140.__isset; return *this; } @@ -3191,6 +3210,7 @@ void Order::printTo(std::ostream& out) const { out << "Order("; out << "col=" << to_string(col); out << ", " << "order=" << to_string(order); + out << ", " << "nullOrder=" << to_string(nullOrder); out << ")"; } diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 97c07a5..3fd2543 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -1575,9 +1575,10 @@ inline std::ostream& operator<<(std::ostream& out, const SerDeInfo& obj) } typedef struct _Order__isset { - _Order__isset() : col(false), order(false) {} + _Order__isset() : col(false), order(false), nullOrder(false) {} bool col :1; bool order :1; + bool nullOrder :1; } _Order__isset; class Order { @@ -1585,12 +1586,13 @@ class Order { Order(const Order&); Order& operator=(const Order&); - Order() : col(), order(0) { + Order() : col(), order(0), nullOrder(0) { } virtual ~Order() throw(); std::string col; int32_t order; + int32_t nullOrder; _Order__isset __isset; @@ -1598,12 +1600,16 @@ class Order { void __set_order(const int32_t val); + void __set_nullOrder(const int32_t val); + bool operator == (const Order & rhs) const { if (!(col == rhs.col)) return false; if (!(order == rhs.order)) return false; + if (!(nullOrder == rhs.nullOrder)) + return false; return true; } bool operator != (const Order &rhs) const { diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java index cc0e2dd..fd05de5 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField COL_FIELD_DESC = new org.apache.thrift.protocol.TField("col", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.I32, (short)2); + private static final org.apache.thrift.protocol.TField NULL_ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("nullOrder", org.apache.thrift.protocol.TType.I32, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,13 @@ private String col; // required private int order; // required + private int nullOrder; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { COL((short)1, "col"), - ORDER((short)2, "order"); + ORDER((short)2, "order"), + NULL_ORDER((short)3, "nullOrder"); private static final Map byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return COL; case 2: // ORDER return ORDER; + case 3: // NULL_ORDER + return NULL_ORDER; default: return null; } @@ -113,6 +118,7 @@ public String getFieldName() { // isset id assignments private static final int __ORDER_ISSET_ID = 0; + private static final int __NULLORDER_ISSET_ID = 1; private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { @@ -121,6 +127,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.NULL_ORDER, new org.apache.thrift.meta_data.FieldMetaData("nullOrder", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Order.class, metaDataMap); } @@ -130,12 +138,15 @@ public Order() { public Order( String col, - int order) + int order, + int nullOrder) { this(); this.col = col; this.order = order; setOrderIsSet(true); + this.nullOrder = nullOrder; + setNullOrderIsSet(true); } /** @@ -147,6 +158,7 @@ public Order(Order other) { this.col = other.col; } this.order = other.order; + this.nullOrder = other.nullOrder; } public Order deepCopy() { @@ -158,6 +170,8 @@ public void clear() { this.col = null; setOrderIsSet(false); this.order = 0; + setNullOrderIsSet(false); + this.nullOrder = 0; } public String getCol() { @@ -205,6 +219,28 @@ public void setOrderIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ORDER_ISSET_ID, value); } + public int getNullOrder() { + return this.nullOrder; + } + + public void setNullOrder(int nullOrder) { + this.nullOrder = nullOrder; + setNullOrderIsSet(true); + } + + public void unsetNullOrder() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NULLORDER_ISSET_ID); + } + + /** Returns true if field nullOrder is set (has been assigned a value) and false otherwise */ + public boolean isSetNullOrder() { + return EncodingUtils.testBit(__isset_bitfield, __NULLORDER_ISSET_ID); + } + + public void setNullOrderIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NULLORDER_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case COL: @@ -223,6 +259,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case NULL_ORDER: + if (value == null) { + unsetNullOrder(); + } else { + setNullOrder((Integer)value); + } + break; + } } @@ -234,6 +278,9 @@ public Object getFieldValue(_Fields field) { case ORDER: return getOrder(); + case NULL_ORDER: + return getNullOrder(); + } throw new IllegalStateException(); } @@ -249,6 +296,8 @@ public boolean isSet(_Fields field) { return isSetCol(); case ORDER: return isSetOrder(); + case NULL_ORDER: + return isSetNullOrder(); } throw new IllegalStateException(); } @@ -284,6 +333,15 @@ public boolean equals(Order that) { return false; } + boolean this_present_nullOrder = true; + boolean that_present_nullOrder = true; + if (this_present_nullOrder || that_present_nullOrder) { + if (!(this_present_nullOrder && that_present_nullOrder)) + return false; + if (this.nullOrder != that.nullOrder) + return false; + } + return true; } @@ -301,6 +359,11 @@ public int hashCode() { if (present_order) list.add(order); + boolean present_nullOrder = true; + list.add(present_nullOrder); + if (present_nullOrder) + list.add(nullOrder); + return list.hashCode(); } @@ -332,6 +395,16 @@ public int compareTo(Order other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetNullOrder()).compareTo(other.isSetNullOrder()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNullOrder()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nullOrder, other.nullOrder); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -363,6 +436,10 @@ public String toString() { sb.append("order:"); sb.append(this.order); first = false; + if (!first) sb.append(", "); + sb.append("nullOrder:"); + sb.append(this.nullOrder); + first = false; sb.append(")"); return sb.toString(); } @@ -424,6 +501,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Order struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // NULL_ORDER + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.nullOrder = iprot.readI32(); + struct.setNullOrderIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -445,6 +530,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Order struct) thro oprot.writeFieldBegin(ORDER_FIELD_DESC); oprot.writeI32(struct.order); oprot.writeFieldEnd(); + oprot.writeFieldBegin(NULL_ORDER_FIELD_DESC); + oprot.writeI32(struct.nullOrder); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -469,19 +557,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Order struct) throw if (struct.isSetOrder()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetNullOrder()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetCol()) { oprot.writeString(struct.col); } if (struct.isSetOrder()) { oprot.writeI32(struct.order); } + if (struct.isSetNullOrder()) { + oprot.writeI32(struct.nullOrder); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Order struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.col = iprot.readString(); struct.setColIsSet(true); @@ -490,6 +584,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Order struct) throws struct.order = iprot.readI32(); struct.setOrderIsSet(true); } + if (incoming.get(2)) { + struct.nullOrder = iprot.readI32(); + struct.setNullOrderIsSet(true); + } } } diff --git metastore/src/gen/thrift/gen-php/metastore/Types.php metastore/src/gen/thrift/gen-php/metastore/Types.php index 488a920..4da4707 100644 --- metastore/src/gen/thrift/gen-php/metastore/Types.php +++ metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -3076,6 +3076,10 @@ class Order { * @var int */ public $order = null; + /** + * @var int + */ + public $nullOrder = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -3088,6 +3092,10 @@ class Order { 'var' => 'order', 'type' => TType::I32, ), + 3 => array( + 'var' => 'nullOrder', + 'type' => TType::I32, + ), ); } if (is_array($vals)) { @@ -3097,6 +3105,9 @@ class Order { if (isset($vals['order'])) { $this->order = $vals['order']; } + if (isset($vals['nullOrder'])) { + $this->nullOrder = $vals['nullOrder']; + } } } @@ -3133,6 +3144,13 @@ class Order { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->nullOrder); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -3156,6 +3174,11 @@ class Order { $xfer += $output->writeI32($this->order); $xfer += $output->writeFieldEnd(); } + if ($this->nullOrder !== null) { + $xfer += $output->writeFieldBegin('nullOrder', TType::I32, 3); + $xfer += $output->writeI32($this->nullOrder); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 10eaf4a..4b20da9 100644 --- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -2237,17 +2237,20 @@ class Order: Attributes: - col - order + - nullOrder """ thrift_spec = ( None, # 0 (1, TType.STRING, 'col', None, None, ), # 1 (2, TType.I32, 'order', None, None, ), # 2 + (3, TType.I32, 'nullOrder', None, None, ), # 3 ) - def __init__(self, col=None, order=None,): + def __init__(self, col=None, order=None, nullOrder=None,): self.col = col self.order = order + self.nullOrder = nullOrder def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2268,6 +2271,11 @@ def read(self, iprot): self.order = iprot.readI32() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.nullOrder = iprot.readI32() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2286,6 +2294,10 @@ def write(self, oprot): oprot.writeFieldBegin('order', TType.I32, 2) oprot.writeI32(self.order) oprot.writeFieldEnd() + if self.nullOrder is not None: + oprot.writeFieldBegin('nullOrder', TType.I32, 3) + oprot.writeI32(self.nullOrder) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2297,6 +2309,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.col) value = (value * 31) ^ hash(self.order) + value = (value * 31) ^ hash(self.nullOrder) return value def __repr__(self): diff --git metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 1cf40ae..556c380 100644 --- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -557,10 +557,12 @@ class Order include ::Thrift::Struct, ::Thrift::Struct_Union COL = 1 ORDER = 2 + NULLORDER = 3 FIELDS = { COL => {:type => ::Thrift::Types::STRING, :name => 'col'}, - ORDER => {:type => ::Thrift::Types::I32, :name => 'order'} + ORDER => {:type => ::Thrift::Types::I32, :name => 'order'}, + NULLORDER => {:type => ::Thrift::Types::I32, :name => 'nullOrder'} } def struct_fields; FIELDS; end diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 6da295e..3b31ee1 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -672,14 +672,16 @@ public void apply(StorageDescriptor t, Object[] fields) { t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings)); } - queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\" from \"SORT_COLS\"" + queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\", \"SORT_COLS\".\"NULL_ORDER\"" + + " from \"SORT_COLS\"" + " where \"SD_ID\" in (" + sdIds + ") and \"INTEGER_IDX\" >= 0" + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc"; loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc() { @Override public void apply(StorageDescriptor t, Object[] fields) { if (fields[2] == null) return; - t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]))); + assert fields[3] != null; + t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]), extractSqlInt(fields[3]))); }}); queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from \"BUCKETING_COLS\"" diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 8d05f49..dcfeba4 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1346,7 +1346,8 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, if (keys != null) { mkeys = new ArrayList(keys.size()); for (Order part : keys) { - mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder())); + mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder(), + part.getNullOrder())); } } return mkeys; @@ -1357,7 +1358,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, if (mkeys != null) { keys = new ArrayList(mkeys.size()); for (MOrder part : mkeys) { - keys.add(new Order(part.getCol(), part.getOrder())); + keys.add(new Order(part.getCol(), part.getOrder(), part.getNullOrder())); } } return keys; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index d6d01bd..a16997b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -707,7 +707,7 @@ static StorageDescriptor deserializeStorageDescriptor(byte[] serialized) sd.setBucketCols(new ArrayList<>(proto.getBucketColsList())); List sortCols = new ArrayList<>(); for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) { - sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder())); + sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder(), protoOrder.getNullOrder())); } sd.setSortCols(sortCols); if (proto.hasSkewedInfo()) { diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java index 1fa82a4..5370c02 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java @@ -21,14 +21,16 @@ public class MOrder { private String col; private int order; - + private int nullOrder; + /** * @param col * @param order */ - public MOrder(String col, int order) { + public MOrder(String col, int order, int nullOrder) { this.col = col; this.order = order; + this.nullOrder = nullOrder; } /** @@ -58,5 +60,19 @@ public int getOrder() { public void setOrder(int order) { this.order = order; } - + + /** + * @return the null order + */ + public int getNullOrder() { + return nullOrder; + } + + /** + * @param nullOrder the null order to set + */ + public void setNullOrder(int nullOrder) { + this.nullOrder = nullOrder; + } + } diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 466fdf9..552097b 100644 --- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -205,6 +205,7 @@ message StorageDescriptor { message Order { required string column_name = 1; optional sint32 order = 2 [default = 1]; + optional sint32 nullOrder = 3 [default = 0]; } message SerDeInfo { diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index 29d5a64..d938a03 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -18,8 +18,16 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; @@ -58,16 +66,8 @@ import org.junit.rules.ExpectedException; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -402,7 +402,7 @@ public void createTable() throws Exception { Map params = new HashMap(); params.put("key", "value"); StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null); store.createTable(table); @@ -424,6 +424,7 @@ public void createTable() throws Exception { Assert.assertEquals(1, t.getSd().getSortColsSize()); Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol()); Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder()); + Assert.assertEquals(0, t.getSd().getSortCols().get(0).getNullOrder()); Assert.assertEquals(1, t.getSd().getParametersSize()); Assert.assertEquals("value", t.getSd().getParameters().get("key")); Assert.assertEquals("me", t.getOwner()); @@ -1273,7 +1274,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw Map params = new HashMap(); params.put("key", "value"); StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, emptyParameters, null, null, null); @@ -1291,7 +1292,7 @@ private Table createMockTable(String type) throws Exception { Map params = new HashMap(); params.put("key", "value"); StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, emptyParameters, null, null, null); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java index b0d7662..570d023 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java @@ -18,34 +18,30 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.FunctionType; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.ResourceType; -import org.apache.hadoop.hive.metastore.api.ResourceUri; -import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; @@ -58,16 +54,8 @@ import org.junit.rules.ExpectedException; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -595,7 +583,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw Map params = new HashMap(); params.put("key", "value"); StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, - serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); + serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, emptyParameters, null, null, null); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java index e0d8ce4..8e856a1 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java @@ -18,8 +18,9 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Iterator; +import java.util.List; + import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; @@ -27,10 +28,8 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.junit.Assert; import org.junit.Test; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** @@ -80,19 +79,22 @@ public void changeOnUnset() { @Test public void changeOrder() { StorageDescriptor sd = new StorageDescriptor(); - sd.addToSortCols(new Order("fred", 1)); + sd.addToSortCols(new Order("fred", 1, 0)); SharedStorageDescriptor ssd = new SharedStorageDescriptor(); ssd.setShared(sd); ssd.getSortCols().get(0).setOrder(2); + ssd.getSortCols().get(0).setNullOrder(3); Assert.assertFalse(sd.getSortCols() == ssd.getSortCols()); Assert.assertEquals(2, ssd.getSortCols().get(0).getOrder()); Assert.assertEquals(1, sd.getSortCols().get(0).getOrder()); + Assert.assertEquals(3, ssd.getSortCols().get(0).getNullOrder()); + Assert.assertEquals(0, sd.getSortCols().get(0).getNullOrder()); } @Test public void unsetOrder() { StorageDescriptor sd = new StorageDescriptor(); - sd.addToSortCols(new Order("fred", 1)); + sd.addToSortCols(new Order("fred", 1, 0)); SharedStorageDescriptor ssd = new SharedStorageDescriptor(); ssd.setShared(sd); ssd.unsetSortCols(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index a81eb18..5509988 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2139,6 +2139,11 @@ private int showCreateTable(Hive db, DataOutputStream outStream, String tableNam else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { sortKeyDesc = sortKeyDesc + "DESC"; } + if (sortCol.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST) { + sortKeyDesc = sortKeyDesc + " NULLS FIRST"; + } else if (sortCol.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_LAST) { + sortKeyDesc = sortKeyDesc + " NULLS LAST"; + } sortKeys.add(sortKeyDesc); } tbl_sort_bucket += StringUtils.join(sortKeys, ", \n"); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 91b5ca7..3cb21d8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -648,6 +648,8 @@ protected void reloadHashTable(byte pos, int partitionId) spilledMapJoinTables[pos] = new MapJoinBytesTableContainer(restoredHashMap); spilledMapJoinTables[pos].setInternalValueOi(container.getInternalValueOi()); spilledMapJoinTables[pos].setSortableSortOrders(container.getSortableSortOrders()); + spilledMapJoinTables[pos].setNullMarkers(container.getNullMarkers()); + spilledMapJoinTables[pos].setNotNullMarkers(container.getNotNullMarkers()); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 69d85c3..02d40b1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -591,7 +591,9 @@ private void updatePartitionBucketSortColumns(Table table, Partition partn, newSortCols.add(new Order( partn.getCols().get(sortCol.getIndexes().get(0)).getName(), sortCol.getSortOrder() == '+' ? BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC : - BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC)); + BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC, + sortCol.getNullSortOrder() == 'a' ? BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST : + BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_LAST)); } else { // If the table is sorted on a partition column, not valid for sorting updateSortCols = false; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java index fdc1dff..f6471db 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java @@ -91,6 +91,8 @@ /** The OI used to deserialize values. We never deserialize keys. */ private LazyBinaryStructObjectInspector internalValueOi; private boolean[] sortableSortOrders; + private byte[] nullMarkers; + private byte[] notNullMarkers; private MapJoinBytesTableContainer.KeyValueHelper writeHelper; private final MapJoinBytesTableContainer.DirectKeyValueWriter directWriteHelper; /* @@ -417,6 +419,14 @@ public LazyBinaryStructObjectInspector getInternalValueOi() { return sortableSortOrders; } + public byte[] getNullMarkers() { + return nullMarkers; + } + + public byte[] getNotNullMarkers() { + return notNullMarkers; + } + /* For a given row, put it into proper partition based on its hash value. * When memory threshold is reached, the biggest hash table in memory will be spilled to disk. * If the hash table of a specific partition is already on disk, all later rows will be put into @@ -708,7 +718,8 @@ public GetAdaptor() { nulls[i] = currentKey[i] == null; } return currentValue.setFromOutput( - MapJoinKey.serializeRow(output, currentKey, vectorKeyOIs, sortableSortOrders)); + MapJoinKey.serializeRow(output, currentKey, vectorKeyOIs, + sortableSortOrders, nullMarkers, notNullMarkers)); } @Override @@ -723,7 +734,8 @@ public GetAdaptor() { nulls[keyIndex] = currentKey[keyIndex] == null; } return currentValue.setFromOutput( - MapJoinKey.serializeRow(output, currentKey, ois, sortableSortOrders)); + MapJoinKey.serializeRow(output, currentKey, ois, + sortableSortOrders, nullMarkers, notNullMarkers)); } @Override @@ -1064,6 +1076,12 @@ public void setSerde(MapJoinObjectSerDeContext keyCtx, MapJoinObjectSerDeContext if (sortableSortOrders == null) { sortableSortOrders = ((BinarySortableSerDe) keySerde).getSortOrders(); } + if (nullMarkers == null) { + nullMarkers = ((BinarySortableSerDe) keySerde).getNullMarkers(); + } + if (notNullMarkers == null) { + notNullMarkers = ((BinarySortableSerDe) keySerde).getNotNullMarkers(); + } } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java index 5c2ff92..a8aa71a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java @@ -25,11 +25,8 @@ import java.util.Collections; import java.util.List; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.debug.Utils; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; import org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapper; @@ -53,9 +50,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -63,6 +60,8 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Writable; import org.apache.hive.common.util.HashCodeUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Table container that serializes keys and values using LazyBinarySerDe into @@ -83,6 +82,8 @@ * ordering. Hence, remember the ordering here; it is null if we do use LazyBinarySerDe. */ private boolean[] sortableSortOrders; + private byte[] nullMarkers; + private byte[] notNullMarkers; private KeyValueHelper writeHelper; private DirectKeyValueWriter directWriteHelper; @@ -138,6 +139,14 @@ public void setSortableSortOrders(boolean[] sortableSortOrders) { this.sortableSortOrders = sortableSortOrders; } + public void setNullMarkers(byte[] nullMarkers) { + this.nullMarkers = nullMarkers; + } + + public void setNotNullMarkers(byte[] notNullMarkers) { + this.notNullMarkers = notNullMarkers; + } + public static interface KeyValueHelper extends BytesBytesMultiHashMap.KvSource { void setKeyValue(Writable key, Writable val) throws SerDeException; /** Get hash value from the key. */ @@ -269,7 +278,14 @@ private void sanityCheckKeyForTag() throws SerDeException { fois.add(fields.get(i).getFieldObjectInspector()); } Output output = new Output(); - BinarySortableSerDe.serializeStruct(output, data, fois, new boolean[fields.size()]); + boolean[] sortableSortOrders = new boolean[fields.size()]; + Arrays.fill(sortableSortOrders, false); + byte[] columnNullMarker = new byte[fields.size()]; + Arrays.fill(columnNullMarker, BinarySortableSerDe.ZERO); + byte[] columnNotNullMarker = new byte[fields.size()]; + Arrays.fill(columnNotNullMarker, BinarySortableSerDe.ONE); + BinarySortableSerDe.serializeStruct(output, data, fois, sortableSortOrders, + columnNullMarker, columnNotNullMarker); hasTag = (output.getLength() != b.getLength()); if (hasTag) { LOG.error("Tag found in keys and will be removed. This should not happen."); @@ -360,10 +376,14 @@ public void setSerde(MapJoinObjectSerDeContext keyContext, MapJoinObjectSerDeCon writeHelper = new LazyBinaryKvWriter(keySerde, valSoi, valueContext.hasFilterTag()); internalValueOi = valSoi; sortableSortOrders = ((BinarySortableSerDe) keySerde).getSortOrders(); + nullMarkers = ((BinarySortableSerDe) keySerde).getNullMarkers(); + notNullMarkers = ((BinarySortableSerDe) keySerde).getNotNullMarkers(); } else { writeHelper = new KeyValueWriter(keySerde, valSerde, valueContext.hasFilterTag()); internalValueOi = createInternalOi(valueContext); sortableSortOrders = null; + nullMarkers = null; + notNullMarkers = null; } } } @@ -476,7 +496,8 @@ public GetAdaptor() { nulls[i] = currentKey[i] == null; } return currentValue.setFromOutput( - MapJoinKey.serializeRow(output, currentKey, vectorKeyOIs, sortableSortOrders)); + MapJoinKey.serializeRow(output, currentKey, vectorKeyOIs, + sortableSortOrders, nullMarkers, notNullMarkers)); } @Override @@ -491,7 +512,8 @@ public GetAdaptor() { nulls[keyIndex] = currentKey[keyIndex] == null; } return currentValue.setFromOutput( - MapJoinKey.serializeRow(output, currentKey, ois, sortableSortOrders)); + MapJoinKey.serializeRow(output, currentKey, ois, + sortableSortOrders, nullMarkers, notNullMarkers)); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java index cfb9abc..9f27f56 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinKey.java @@ -117,7 +117,8 @@ public static MapJoinKey readFromVector(Output output, MapJoinKey key, Object[] */ public static Output serializeVector(Output byteStream, VectorHashKeyWrapper kw, VectorExpressionWriter[] keyOutputWriters, VectorHashKeyWrapperBatch keyWrapperBatch, - boolean[] nulls, boolean[] sortableSortOrders) throws HiveException, SerDeException { + boolean[] nulls, boolean[] sortableSortOrders, byte[] nullMarkers, byte[] notNullMarkers) + throws HiveException, SerDeException { Object[] fieldData = new Object[keyOutputWriters.length]; List fieldOis = new ArrayList(); for (int i = 0; i < keyOutputWriters.length; ++i) { @@ -130,7 +131,8 @@ public static Output serializeVector(Output byteStream, VectorHashKeyWrapper kw, nulls[i] = (fieldData[i] == null); } } - return serializeRow(byteStream, fieldData, fieldOis, sortableSortOrders); + return serializeRow(byteStream, fieldData, fieldOis, sortableSortOrders, + nullMarkers, notNullMarkers); } public static MapJoinKey readFromRow(Output output, MapJoinKey key, Object[] keyObject, @@ -145,7 +147,8 @@ public static MapJoinKey readFromRow(Output output, MapJoinKey key, Object[] key * @param byteStream Output to reuse. Can be null, in that case a new one would be created. */ public static Output serializeRow(Output byteStream, Object[] fieldData, - List fieldOis, boolean[] sortableSortOrders) throws HiveException { + List fieldOis, boolean[] sortableSortOrders, + byte[] nullMarkers, byte[] notNullMarkers) throws HiveException { if (byteStream == null) { byteStream = new Output(); } else { @@ -157,7 +160,8 @@ public static Output serializeRow(Output byteStream, Object[] fieldData, } else if (sortableSortOrders == null) { LazyBinarySerDe.serializeStruct(byteStream, fieldData, fieldOis); } else { - BinarySortableSerDe.serializeStruct(byteStream, fieldData, fieldOis, sortableSortOrders); + BinarySortableSerDe.serializeStruct(byteStream, fieldData, fieldOis, sortableSortOrders, + nullMarkers, notNullMarkers); } } catch (SerDeException e) { throw new HiveException("Serialization error", e); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java index 1510fdd..3f16359 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java @@ -180,7 +180,7 @@ private void initialize() throws SerDeException { numExpectedEventsPerSource.get(s).decrement(); ++sourceInfoCount; String columnName = cit.next(); - String columnType = typit.next(); + String columnType = typit.next(); ExprNodeDesc partKeyExpr = pit.next(); SourceInfo si = createSourceInfo(t, partKeyExpr, columnName, columnType, jobConf); if (!sourceInfoMap.containsKey(s)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java index 7bdd11a..fcf18c1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java @@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.Counter; import org.apache.hadoop.hive.ql.exec.TerminalOperator; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator.Counter; import org.apache.hadoop.hive.ql.exec.vector.VectorSerializeRow; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion; @@ -39,14 +39,15 @@ import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.BaseWork; -import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.ByteStream.Output; +import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; import org.apache.hadoop.hive.serde2.lazybinary.fast.LazyBinarySerializeWrite; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -188,6 +189,60 @@ public VectorReduceSinkCommonOperator(CompilationOpContext ctx, return columnSortOrderIsDesc; } + private byte[] getColumnNullMarker(Properties properties, int columnCount, boolean[] columnSortOrder) { + String columnNullOrder = properties.getProperty(serdeConstants.SERIALIZATION_NULL_POSITION); + byte[] columnNullMarker = new byte[columnCount]; + for (int i = 0; i < columnNullMarker.length; i++) { + if (columnSortOrder[i]) { + // Descending + if (columnNullOrder != null && columnNullOrder.charAt(i) == 'a') { + // Null first + columnNullMarker[i] = BinarySortableSerDe.ONE; + } else { + // Null last (default for descending order) + columnNullMarker[i] = BinarySortableSerDe.ZERO; + } + } else { + // Ascending + if (columnNullOrder != null && columnNullOrder.charAt(i) == 'z') { + // Null last + columnNullMarker[i] = BinarySortableSerDe.ONE; + } else { + // Null first (default for ascending order) + columnNullMarker[i] = BinarySortableSerDe.ZERO; + } + } + } + return columnNullMarker; + } + + private byte[] getColumnNotNullMarker(Properties properties, int columnCount, boolean[] columnSortOrder) { + String columnNullOrder = properties.getProperty(serdeConstants.SERIALIZATION_NULL_POSITION); + byte[] columnNotNullMarker = new byte[columnCount]; + for (int i = 0; i < columnNotNullMarker.length; i++) { + if (columnSortOrder[i]) { + // Descending + if (columnNullOrder != null && columnNullOrder.charAt(i) == 'a') { + // Null first + columnNotNullMarker[i] = BinarySortableSerDe.ZERO; + } else { + // Null last (default for descending order) + columnNotNullMarker[i] = BinarySortableSerDe.ONE; + } + } else { + // Ascending + if (columnNullOrder != null && columnNullOrder.charAt(i) == 'z') { + // Null last + columnNotNullMarker[i] = BinarySortableSerDe.ZERO; + } else { + // Null first (default for ascending order) + columnNotNullMarker[i] = BinarySortableSerDe.ONE; + } + } + } + return columnNotNullMarker; + } + @Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); @@ -217,8 +272,13 @@ protected void initializeOp(Configuration hconf) throws HiveException { TableDesc keyTableDesc = conf.getKeySerializeInfo(); boolean[] columnSortOrder = getColumnSortOrder(keyTableDesc.getProperties(), reduceSinkKeyColumnMap.length); + byte[] columnNullMarker = + getColumnNullMarker(keyTableDesc.getProperties(), reduceSinkKeyColumnMap.length, columnSortOrder); + byte[] columnNotNullMarker = + getColumnNotNullMarker(keyTableDesc.getProperties(), reduceSinkKeyColumnMap.length, columnSortOrder); - keyBinarySortableSerializeWrite = new BinarySortableSerializeWrite(columnSortOrder); + keyBinarySortableSerializeWrite = new BinarySortableSerializeWrite(columnSortOrder, + columnNullMarker, columnNotNullMarker); // Create all nulls key. try { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index f51e1a4..e96f91a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -938,7 +938,7 @@ public void createIndex(String tableName, String indexName, String indexHandlerC FieldSchema col = cols.get(i); if (indexedCols.contains(col.getName())) { indexTblCols.add(col); - sortCols.add(new Order(col.getName(), 1)); + sortCols.add(new Order(col.getName(), 1, 0)); k++; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java index b57dc77..677649d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java @@ -357,7 +357,8 @@ private boolean checkSortColsAndJoinCols(List sortCols, Order o = sortCols.get(pos); if (pos < sortColumnsFirstPartition.size()) { - if (o.getOrder() != sortColumnsFirstPartition.get(pos).getOrder()) { + if (o.getOrder() != sortColumnsFirstPartition.get(pos).getOrder() || + o.getNullOrder() != sortColumnsFirstPartition.get(pos).getNullOrder()) { return false; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java index d5f3057..3d580d8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java @@ -148,23 +148,55 @@ public BucketSortReduceSinkProcessor(ParseContext pGraphContext) { // Get the sort positions and sort order for the table // The sort order contains whether the sorting is happening ascending or descending - private ObjectPair, List> getSortPositionsOrder( + private List getSortPositions( List tabSortCols, List tabCols) { List sortPositions = new ArrayList(); - List sortOrders = new ArrayList(); for (Order sortCol : tabSortCols) { int pos = 0; for (FieldSchema tabCol : tabCols) { if (sortCol.getCol().equals(tabCol.getName())) { sortPositions.add(pos); + break; + } + pos++; + } + } + return sortPositions; + } + + private List getSortOrder( + List tabSortCols, + List tabCols) { + List sortOrders = new ArrayList(); + for (Order sortCol : tabSortCols) { + int pos = 0; + for (FieldSchema tabCol : tabCols) { + if (sortCol.getCol().equals(tabCol.getName())) { sortOrders.add(sortCol.getOrder()); break; } pos++; } } - return new ObjectPair, List>(sortPositions, sortOrders); + return sortOrders; + } + + private List getNullSortOrder( + List tabSortCols, + List tabCols) { + List nullSortOrders = new ArrayList(); + for (Order sortCol : tabSortCols) { + int pos = 0; + for (FieldSchema tabCol : tabCols) { + if (sortCol.getCol().equals(tabCol.getName())) { + nullSortOrders.add(sortCol.getNullOrder()); + break; + } + pos++; + } + } + return nullSortOrders; } // Return true if the partition is bucketed/sorted by the specified positions @@ -174,6 +206,7 @@ private boolean checkPartition(Partition partition, List bucketPositionsDest, List sortPositionsDest, List sortOrderDest, + List sortNullOrderDest, int numBucketsDest) { // The bucketing and sorting positions should exactly match int numBuckets = partition.getBucketCount(); @@ -183,11 +216,16 @@ private boolean checkPartition(Partition partition, List partnBucketPositions = getBucketPositions(partition.getBucketCols(), partition.getTable().getCols()); - ObjectPair, List> partnSortPositionsOrder = - getSortPositionsOrder(partition.getSortCols(), partition.getTable().getCols()); + List sortPositions = + getSortPositions(partition.getSortCols(), partition.getTable().getCols()); + List sortOrder = + getSortOrder(partition.getSortCols(), partition.getTable().getCols()); + List sortNullOrder = + getNullSortOrder(partition.getSortCols(), partition.getTable().getCols()); return bucketPositionsDest.equals(partnBucketPositions) && - sortPositionsDest.equals(partnSortPositionsOrder.getFirst()) && - sortOrderDest.equals(partnSortPositionsOrder.getSecond()); + sortPositionsDest.equals(sortPositions) && + sortOrderDest.equals(sortOrder) && + sortNullOrderDest.equals(sortNullOrder); } // Return true if the table is bucketed/sorted by the specified positions @@ -197,6 +235,7 @@ private boolean checkTable(Table table, List bucketPositionsDest, List sortPositionsDest, List sortOrderDest, + List sortNullOrderDest, int numBucketsDest) { // The bucketing and sorting positions should exactly match int numBuckets = table.getNumBuckets(); @@ -206,11 +245,16 @@ private boolean checkTable(Table table, List tableBucketPositions = getBucketPositions(table.getBucketCols(), table.getCols()); - ObjectPair, List> tableSortPositionsOrder = - getSortPositionsOrder(table.getSortCols(), table.getCols()); + List sortPositions = + getSortPositions(table.getSortCols(), table.getCols()); + List sortOrder = + getSortOrder(table.getSortCols(), table.getCols()); + List sortNullOrder = + getNullSortOrder(table.getSortCols(), table.getCols()); return bucketPositionsDest.equals(tableBucketPositions) && - sortPositionsDest.equals(tableSortPositionsOrder.getFirst()) && - sortOrderDest.equals(tableSortPositionsOrder.getSecond()); + sortPositionsDest.equals(sortPositions) && + sortOrderDest.equals(sortOrder) && + sortNullOrderDest.equals(sortNullOrder); } // Store the bucket path to bucket number mapping in the table scan operator. @@ -288,7 +332,8 @@ private int findColumnPosition(List cols, String colName) { private boolean validateSMBJoinKeys(SMBJoinDesc smbJoinDesc, List sourceTableBucketCols, List sourceTableSortCols, - List sortOrder) { + List sortOrder, + List sortNullOrder) { // The sort-merge join creates the output sorted and bucketized by the same columns. // This can be relaxed in the future if there is a requirement. if (!sourceTableBucketCols.equals(sourceTableSortCols)) { @@ -426,10 +471,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // also match for this to be converted to a map-only job. List bucketPositions = getBucketPositions(destTable.getBucketCols(), destTable.getCols()); - ObjectPair, List> sortOrderPositions = - getSortPositionsOrder(destTable.getSortCols(), destTable.getCols()); - List sortPositions = sortOrderPositions.getFirst(); - List sortOrder = sortOrderPositions.getSecond(); + List sortPositions = + getSortPositions(destTable.getSortCols(), destTable.getCols()); + List sortOrder = + getSortOrder(destTable.getSortCols(), destTable.getCols()); + List sortNullOrder = + getNullSortOrder(destTable.getSortCols(), destTable.getCols()); boolean useBucketSortPositions = true; // Only selects and filters are allowed @@ -464,7 +511,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } if (!validateSMBJoinKeys(smbJoinDesc, sourceTableBucketCols, - sourceTableSortCols, sortOrder)) { + sourceTableSortCols, sortOrder, sortNullOrder)) { return null; } @@ -539,7 +586,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } for (Partition partition : partitions) { if (!checkPartition(partition, newBucketPositions, newSortPositions, sortOrder, - numBucketsDestination)) { + sortNullOrder, numBucketsDestination)) { return null; } } @@ -550,7 +597,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } else { if (!checkTable(srcTable, newBucketPositions, newSortPositions, sortOrder, - numBucketsDestination)) { + sortNullOrder, numBucketsDestination)) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java index c38c6d7..2d5dbfd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java @@ -18,18 +18,16 @@ package org.apache.hadoop.hive.ql.optimizer; +import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.FIXED; + import java.util.ArrayList; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Stack; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -60,11 +58,11 @@ import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Sets; -import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.FIXED; - public class ReduceSinkMapJoinProc implements NodeProcessor { private final static Logger LOG = LoggerFactory.getLogger(ReduceSinkMapJoinProc.class.getName()); @@ -347,11 +345,14 @@ public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, Ma Map> keyExprMap = mapJoinOp.getConf().getKeys(); List keyCols = keyExprMap.get(Byte.valueOf((byte) 0)); StringBuilder keyOrder = new StringBuilder(); + StringBuilder nullPosition = new StringBuilder(); for (ExprNodeDesc k: keyCols) { keyOrder.append("+"); + nullPosition.append("a"); } TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils - .getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString()); + .getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), + nullPosition.toString()); mapJoinOp.getConf().setKeyTableDesc(keyTableDesc); // let the dummy op be the parent of mapjoin op diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index 27b0457..150cbad 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; @@ -180,10 +181,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Get the positions for partition, bucket and sort columns List bucketPositions = getBucketPositions(destTable.getBucketCols(), destTable.getCols()); - ObjectPair, List> sortOrderPositions = getSortPositionsOrder( - destTable.getSortCols(), destTable.getCols()); List sortPositions = null; List sortOrder = null; + List sortNullOrder = null; if (fsOp.getConf().getWriteType() == AcidUtils.Operation.UPDATE || fsOp.getConf().getWriteType() == AcidUtils.Operation.DELETE) { // When doing updates and deletes we always want to sort on the rowid because the ACID @@ -191,13 +191,16 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // ignore whatever comes from the table and enforce this sort order instead. sortPositions = Arrays.asList(0); sortOrder = Arrays.asList(1); // 1 means asc, could really use enum here in the thrift if + sortNullOrder = Arrays.asList(0); } else { - sortPositions = sortOrderPositions.getFirst(); - sortOrder = sortOrderPositions.getSecond(); + sortPositions = getSortPositions(destTable.getSortCols(), destTable.getCols()); + sortOrder = getSortOrders(destTable.getSortCols(), destTable.getCols()); + sortNullOrder = getSortNullOrders(destTable.getSortCols(), destTable.getCols()); } LOG.debug("Got sort order"); for (int i : sortPositions) LOG.debug("sort position " + i); for (int i : sortOrder) LOG.debug("sort order " + i); + for (int i : sortNullOrder) LOG.debug("sort null order " + i); List partitionPositions = getPartitionPositions(dpCtx, fsParent.getSchema()); List colInfos = fsParent.getSchema().getSignature(); ArrayList bucketColumns = getPositionsToExprNodes(bucketPositions, colInfos); @@ -214,7 +217,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, for (ColumnInfo ci : valColInfo) { newValueCols.add(new ExprNodeColumnDesc(ci)); } - ReduceSinkDesc rsConf = getReduceSinkDesc(partitionPositions, sortPositions, sortOrder, + ReduceSinkDesc rsConf = getReduceSinkDesc(partitionPositions, sortPositions, sortOrder, sortNullOrder, newValueCols, bucketColumns, numBuckets, fsParent, fsOp.getConf().getWriteType()); if (!bucketColumns.isEmpty()) { @@ -384,17 +387,19 @@ private boolean removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) { } public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, - List sortPositions, List sortOrder, ArrayList newValueCols, - ArrayList bucketColumns, int numBuckets, + List sortPositions, List sortOrder, List sortNullOrder, + ArrayList newValueCols, ArrayList bucketColumns, int numBuckets, Operator parent, AcidUtils.Operation writeType) { // Order of KEY columns // 1) Partition columns // 2) Bucket number column // 3) Sort columns + // 4) Null sort columns List keyColsPosInVal = Lists.newArrayList(); ArrayList newKeyCols = Lists.newArrayList(); List newSortOrder = Lists.newArrayList(); + List newSortNullOrder = Lists.newArrayList(); int numPartAndBuck = partitionPositions.size(); keyColsPosInVal.addAll(partitionPositions); @@ -425,6 +430,30 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, } } + // if partition and bucket columns are sorted in ascending order, by default + // nulls come first; otherwise nulls come last + Integer nullOrder = order == 1 ? 0 : 1; + if (sortNullOrder != null && !sortNullOrder.isEmpty()) { + if (sortNullOrder.get(0).intValue() == 0) { + nullOrder = 0; + } else { + nullOrder = 1; + } + } + for (int i = 0; i < numPartAndBuck; i++) { + newSortNullOrder.add(nullOrder); + } + newSortNullOrder.addAll(sortNullOrder); + + String nullOrderStr = ""; + for (Integer i : newSortNullOrder) { + if(i.intValue() == 0) { + nullOrderStr += "a"; + } else { + nullOrderStr += "z"; + } + } + ArrayList newPartCols = Lists.newArrayList(); // we will clone here as RS will update bucket column key with its @@ -451,9 +480,11 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, ReduceSinkOperator.class); if (parentRSOp != null && parseCtx.getQueryProperties().hasOuterOrderBy()) { String parentRSOpOrder = parentRSOp.getConf().getOrder(); + String parentRSOpNullOrder = parentRSOp.getConf().getNullPosition(); if (parentRSOpOrder != null && !parentRSOpOrder.isEmpty() && sortPositions.isEmpty()) { newKeyCols.addAll(parentRSOp.getConf().getKeyCols()); orderStr += parentRSOpOrder; + nullOrderStr += parentRSOpNullOrder; } } @@ -462,7 +493,7 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, // from Key and Value TableDesc List fields = PlanUtils.getFieldSchemasFromColumnList(newKeyCols, "reducesinkkey"); - TableDesc keyTable = PlanUtils.getReduceKeyTableDesc(fields, orderStr); + TableDesc keyTable = PlanUtils.getReduceKeyTableDesc(fields, orderStr, nullOrderStr); ArrayList outputKeyCols = Lists.newArrayList(); for (int i = 0; i < newKeyCols.size(); i++) { outputKeyCols.add("reducesinkkey" + i); @@ -490,27 +521,65 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, } /** - * Get the sort positions and sort order for the sort columns + * Get the sort positions for the sort columns * @param tabSortCols * @param tabCols * @return */ - private ObjectPair, List> getSortPositionsOrder(List tabSortCols, + private List getSortPositions(List tabSortCols, List tabCols) { List sortPositions = Lists.newArrayList(); - List sortOrders = Lists.newArrayList(); for (Order sortCol : tabSortCols) { int pos = 0; for (FieldSchema tabCol : tabCols) { if (sortCol.getCol().equals(tabCol.getName())) { sortPositions.add(pos); - sortOrders.add(sortCol.getOrder()); break; } pos++; } } - return new ObjectPair, List>(sortPositions, sortOrders); + return sortPositions; + } + + /** + * Get the sort order for the sort columns + * @param tabSortCols + * @param tabCols + * @return + */ + private List getSortOrders(List tabSortCols, + List tabCols) { + List sortOrders = Lists.newArrayList(); + for (Order sortCol : tabSortCols) { + for (FieldSchema tabCol : tabCols) { + if (sortCol.getCol().equals(tabCol.getName())) { + sortOrders.add(sortCol.getOrder()); + break; + } + } + } + return sortOrders; + } + + /** + * Get the null sort order for the sort columns + * @param tabSortCols + * @param tabCols + * @return + */ + private List getSortNullOrders(List tabSortCols, + List tabCols) { + List sortNullOrders = Lists.newArrayList(); + for (Order sortCol : tabSortCols) { + for (FieldSchema tabCol : tabCols) { + if (sortCol.getCol().equals(tabCol.getName())) { + sortNullOrders.add(sortCol.getNullOrder()); + break; + } + } + } + return sortNullOrders; } private ArrayList getPositionsToExprNodes(List pos, diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 02db680..a95da0a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -33,6 +33,7 @@ import org.apache.calcite.rel.RelDistribution; import org.apache.calcite.rel.RelFieldCollation; import org.apache.calcite.rel.RelFieldCollation.Direction; +import org.apache.calcite.rel.RelFieldCollation.NullDirection; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.logical.LogicalTableScan; import org.apache.calcite.rel.type.RelDataType; @@ -165,7 +166,14 @@ public RelNode toRel(ToRelContext context) { else { direction = Direction.DESCENDING; } - collationList.add(new RelFieldCollation(i,direction)); + NullDirection nullDirection; + if (sortColumn.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST) { + nullDirection = NullDirection.FIRST; + } + else { + nullDirection = NullDirection.LAST; + } + collationList.add(new RelFieldCollation(i,direction,nullDirection)); break; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java index 18145ae..f1ec7cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java @@ -271,8 +271,8 @@ private void sortLimit(RelOptCluster cluster, RelBuilder relBuilder, int offset, final List originalExtraNodes = ImmutableList.copyOf(extraNodes); for (RexNode node : nodes) { fieldCollations.add( - collation(node, RelFieldCollation.Direction.ASCENDING, null, - extraNodes)); + collation(node, RelFieldCollation.Direction.ASCENDING, + RelFieldCollation.NullDirection.FIRST, extraNodes)); } final RexNode offsetNode = offset <= 0 ? null : relBuilder.literal(offset); final RexNode fetchNode = fetch < 0 ? null : relBuilder.literal(fetch); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index 3f2267d..de7e2f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -52,8 +52,6 @@ import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.ImmutableBitSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; @@ -64,6 +62,8 @@ import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Iterables; @@ -226,6 +226,24 @@ private void convertOrderLimitToASTNode(HiveSortLimit order) { ASTNode directionAST = c.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + ASTNode nullDirectionAST; + // Null direction + if (c.nullDirection == RelFieldCollation.NullDirection.FIRST) { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_FIRST, "TOK_NULLS_FIRST"); + directionAST.addChild(nullDirectionAST); + } else if (c.nullDirection == RelFieldCollation.NullDirection.LAST) { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_LAST, "TOK_NULLS_LAST"); + directionAST.addChild(nullDirectionAST); + } else { + // Default + if (c.getDirection() == RelFieldCollation.Direction.ASCENDING) { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_FIRST, "TOK_NULLS_FIRST"); + directionAST.addChild(nullDirectionAST); + } else { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_LAST, "TOK_NULLS_LAST"); + directionAST.addChild(nullDirectionAST); + } + } // 3 Convert OB expr (OB Expr is usually an input ref except for top // level OB; top level OB will have RexCall kept in a map.) @@ -245,7 +263,7 @@ private void convertOrderLimitToASTNode(HiveSortLimit order) { } // 4 buildup the ob expr AST - directionAST.addChild(astCol); + nullDirectionAST.addChild(astCol); orderAst.addChild(directionAST); } hiveAST.order = orderAst; @@ -430,12 +448,31 @@ private ASTNode getPSpecAST(RexWindow window) { if (window.orderKeys != null && !window.orderKeys.isEmpty()) { oByAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); for (RexFieldCollation ok : window.orderKeys) { - ASTNode astNode = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + ASTNode directionAST = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + ASTNode nullDirectionAST; + // Null direction + if (ok.right.contains(SqlKind.NULLS_FIRST)) { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_FIRST, "TOK_NULLS_FIRST"); + directionAST.addChild(nullDirectionAST); + } else if (ok.right.contains(SqlKind.NULLS_LAST)) { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_LAST, "TOK_NULLS_LAST"); + directionAST.addChild(nullDirectionAST); + } else { + // Default + if (ok.getDirection() == RelFieldCollation.Direction.ASCENDING) { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_FIRST, "TOK_NULLS_FIRST"); + directionAST.addChild(nullDirectionAST); + } else { + nullDirectionAST = ASTBuilder.createAST(HiveParser.TOK_NULLS_LAST, "TOK_NULLS_LAST"); + directionAST.addChild(nullDirectionAST); + } + } ASTNode astCol = ok.left.accept(this); - astNode.addChild(astCol); - oByAst.addChild(astNode); + + nullDirectionAST.addChild(astCol); + oByAst.addChild(directionAST); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java index b42e78f..d012912 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java @@ -41,8 +41,6 @@ import org.apache.calcite.rex.RexWindowBound; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.type.SqlTypeUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveIntervalDayTime; import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth; @@ -50,6 +48,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema; import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; @@ -70,6 +69,8 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableSet; @@ -278,7 +279,18 @@ private PartitioningSpec getPSpec(RexWindow window) { OrderExpression exprSpec = new OrderExpression(); Order order = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? Order.ASC : Order.DESC; + NullOrder nullOrder; + if ( ok.right.contains(SqlKind.NULLS_FIRST) ) { + nullOrder = NullOrder.NULLS_FIRST; + } else if ( ok.right.contains(SqlKind.NULLS_LAST) ) { + nullOrder = NullOrder.NULLS_LAST; + } else { + // Default + nullOrder = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? + NullOrder.NULLS_FIRST : NullOrder.NULLS_LAST; + } exprSpec.setOrder(order); + exprSpec.setNullOrder(nullOrder); ASTNode astNode = ok.left.accept(new RexVisitor(schema)); exprSpec.setExpression(astNode); oSpec.addExpression(exprSpec); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java index b841315..1307808 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java @@ -41,8 +41,6 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.ImmutableBitSet; import org.apache.calcite.util.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.StrictChecks; import org.apache.hadoop.hive.ql.exec.ColumnInfo; @@ -65,8 +63,8 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin; -import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortExchange; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion; import org.apache.hadoop.hive.ql.parse.JoinCond; @@ -99,6 +97,8 @@ import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UnionDesc; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; @@ -435,6 +435,7 @@ OpAttr visit(HiveSortLimit sortRel) throws SemanticException { Map obRefToCallMap = sortRel.getInputRefToCallMap(); List sortCols = new ArrayList(); StringBuilder order = new StringBuilder(); + StringBuilder nullOrder = new StringBuilder(); for (RelFieldCollation sortInfo : sortRel.getCollation().getFieldCollations()) { int sortColumnPos = sortInfo.getFieldIndex(); ColumnInfo columnInfo = new ColumnInfo(inputOp.getSchema().getSignature() @@ -447,6 +448,14 @@ OpAttr visit(HiveSortLimit sortRel) throws SemanticException { } else { order.append("+"); } + if (sortInfo.nullDirection == RelFieldCollation.NullDirection.FIRST) { + nullOrder.append("a"); + } else if (sortInfo.nullDirection == RelFieldCollation.NullDirection.LAST) { + nullOrder.append("z"); + } else { + // Default + nullOrder.append(sortInfo.getDirection() == RelFieldCollation.Direction.DESCENDING ? "z" : "a"); + } if (obRefToCallMap != null) { RexNode obExpr = obRefToCallMap.get(sortColumnPos); @@ -474,7 +483,7 @@ OpAttr visit(HiveSortLimit sortRel) throws SemanticException { // 1.b. Generate reduce sink and project operator resultOp = genReduceSinkAndBacktrackSelect(resultOp, sortCols.toArray(new ExprNodeDesc[sortCols.size()]), 0, new ArrayList(), - order.toString(), numReducers, Operation.NOT_ACID, hiveConf, keepColumns); + order.toString(), nullOrder.toString(), numReducers, Operation.NOT_ACID, hiveConf, keepColumns); } // 2. If we need to generate limit @@ -626,6 +635,7 @@ private OpAttr genPTF(OpAttr inputOpAf, WindowingSpec wSpec) throws SemanticExce ArrayList keyCols = new ArrayList(); ArrayList partCols = new ArrayList(); StringBuilder order = new StringBuilder(); + StringBuilder nullOrder = new StringBuilder(); for (PartitionExpression partCol : wSpec.getQueryPartitionSpec().getExpressions()) { ExprNodeDesc partExpr = semanticAnalyzer.genExprNodeDesc(partCol.getExpression(), rr); @@ -633,6 +643,7 @@ private OpAttr genPTF(OpAttr inputOpAf, WindowingSpec wSpec) throws SemanticExce keyCols.add(partExpr); partCols.add(partExpr); order.append('+'); + nullOrder.append('a'); } } @@ -640,19 +651,22 @@ private OpAttr genPTF(OpAttr inputOpAf, WindowingSpec wSpec) throws SemanticExce for (OrderExpression orderCol : wSpec.getQueryOrderSpec().getExpressions()) { ExprNodeDesc orderExpr = semanticAnalyzer.genExprNodeDesc(orderCol.getExpression(), rr); char orderChar = orderCol.getOrder() == PTFInvocationSpec.Order.ASC ? '+' : '-'; + char nullOrderChar = orderCol.getNullOrder() == PTFInvocationSpec.NullOrder.NULLS_FIRST ? 'a' : 'z'; int index = ExprNodeDescUtils.indexOf(orderExpr, keyCols); if (index >= 0) { order.setCharAt(index, orderChar); + nullOrder.setCharAt(index, nullOrderChar); continue; } keyCols.add(orderExpr); order.append(orderChar); + nullOrder.append(nullOrderChar); } } SelectOperator selectOp = genReduceSinkAndBacktrackSelect(input, keyCols.toArray(new ExprNodeDesc[keyCols.size()]), 0, partCols, - order.toString(), -1, Operation.NOT_ACID, hiveConf); + order.toString(), nullOrder.toString(), -1, Operation.NOT_ACID, hiveConf); // 2. Finally create PTF PTFTranslator translator = new PTFTranslator(); @@ -677,14 +691,14 @@ private OpAttr genPTF(OpAttr inputOpAf, WindowingSpec wSpec) throws SemanticExce private static SelectOperator genReduceSinkAndBacktrackSelect(Operator input, ExprNodeDesc[] keys, int tag, ArrayList partitionCols, String order, - int numReducers, Operation acidOperation, HiveConf hiveConf) + String nullOrder, int numReducers, Operation acidOperation, HiveConf hiveConf) throws SemanticException { - return genReduceSinkAndBacktrackSelect(input, keys, tag, partitionCols, order, + return genReduceSinkAndBacktrackSelect(input, keys, tag, partitionCols, order, nullOrder, numReducers, acidOperation, hiveConf, input.getSchema().getColumnNames()); } private static SelectOperator genReduceSinkAndBacktrackSelect(Operator input, - ExprNodeDesc[] keys, int tag, ArrayList partitionCols, String order, + ExprNodeDesc[] keys, int tag, ArrayList partitionCols, String order, String nullOrder, int numReducers, Operation acidOperation, HiveConf hiveConf, List keepColNames) throws SemanticException { // 1. Generate RS operator @@ -715,7 +729,8 @@ private static SelectOperator genReduceSinkAndBacktrackSelect(Operator input, "In CBO return path, genReduceSinkAndBacktrackSelect is expecting only one tableAlias but there is none"); } // 1.2 Now generate RS operator - ReduceSinkOperator rsOp = genReduceSink(input, tableAlias, keys, tag, partitionCols, order, numReducers, acidOperation, hiveConf); + ReduceSinkOperator rsOp = genReduceSink(input, tableAlias, keys, tag, partitionCols, order, + nullOrder, numReducers, acidOperation, hiveConf); // 2. Generate backtrack Select operator Map descriptors = buildBacktrackFromReduceSink(keepColNames, @@ -737,13 +752,13 @@ private static SelectOperator genReduceSinkAndBacktrackSelect(Operator input, private static ReduceSinkOperator genReduceSink(Operator input, String tableAlias, ExprNodeDesc[] keys, int tag, int numReducers, Operation acidOperation, HiveConf hiveConf) throws SemanticException { - return genReduceSink(input, tableAlias, keys, tag, new ArrayList(), "", numReducers, + return genReduceSink(input, tableAlias, keys, tag, new ArrayList(), "", "", numReducers, acidOperation, hiveConf); } @SuppressWarnings({ "rawtypes", "unchecked" }) private static ReduceSinkOperator genReduceSink(Operator input, String tableAlias, ExprNodeDesc[] keys, int tag, - ArrayList partitionCols, String order, int numReducers, + ArrayList partitionCols, String order, String nullOrder, int numReducers, Operation acidOperation, HiveConf hiveConf) throws SemanticException { Operator dummy = Operator.createDummy(); // dummy for backtracking dummy.setParentOperators(Arrays.asList(input)); @@ -818,7 +833,7 @@ private static ReduceSinkOperator genReduceSink(Operator input, String tableA reduceKeys.size(), numReducers, acidOperation); } else { rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys, reduceValues, outputColumnNames, false, tag, - partitionCols, order, numReducers, acidOperation); + partitionCols, order, nullOrder, numReducers, acidOperation); } ReduceSinkOperator rsOp = (ReduceSinkOperator) OperatorFactory.getAndMakeChild( diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java index 638b91e..e210680 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java @@ -201,7 +201,8 @@ protected boolean merge(ReduceSinkOperator cRS, JoinOperator pJoin, int minReduc return false; } - Integer moveRSOrderTo = checkOrder(cRSc.getOrder(), pRSNc.getOrder()); + Integer moveRSOrderTo = checkOrder(cRSc.getOrder(), pRSNc.getOrder(), + cRSc.getNullPosition(), pRSNc.getNullPosition()); if (moveRSOrderTo == null) { return false; } @@ -298,6 +299,7 @@ protected boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minR "Try set " + HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION + "=false;"); } pRS.getConf().setOrder(cRS.getConf().getOrder()); + pRS.getConf().setNullPosition(cRS.getConf().getNullPosition()); } if (result[3] > 0) { @@ -313,7 +315,8 @@ protected boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minR pRS.getConf().setNumDistributionKeys(cRS.getConf().getNumDistributionKeys()); List fields = PlanUtils.getFieldSchemasFromColumnList(pRS.getConf() .getKeyCols(), "reducesinkkey"); - TableDesc keyTable = PlanUtils.getReduceKeyTableDesc(fields, pRS.getConf().getOrder()); + TableDesc keyTable = PlanUtils.getReduceKeyTableDesc(fields, pRS.getConf().getOrder(), + pRS.getConf().getNullPosition()); ArrayList outputKeyCols = Lists.newArrayList(); for (int i = 0; i < fields.size(); i++) { outputKeyCols.add(fields.get(i).getName()); @@ -337,7 +340,8 @@ protected boolean merge(ReduceSinkOperator cRS, ReduceSinkOperator pRS, int minR throws SemanticException { ReduceSinkDesc cConf = cRS.getConf(); ReduceSinkDesc pConf = pRS.getConf(); - Integer moveRSOrderTo = checkOrder(cConf.getOrder(), pConf.getOrder()); + Integer moveRSOrderTo = checkOrder(cConf.getOrder(), pConf.getOrder(), + cConf.getNullPosition(), pConf.getNullPosition()); if (moveRSOrderTo == null) { return null; } @@ -447,7 +451,10 @@ protected Integer sameKeys(List cexprs, List pexprs, } // order of overlapping keys should be exactly the same - protected Integer checkOrder(String corder, String porder) { + protected Integer checkOrder(String corder, String porder, + String cNullOrder, String pNullOrder) { + assert corder.length() == cNullOrder.length(); + assert porder.length() == pNullOrder.length(); if (corder == null || corder.trim().equals("")) { if (porder == null || porder.trim().equals("")) { return 0; @@ -459,8 +466,11 @@ protected Integer checkOrder(String corder, String porder) { } corder = corder.trim(); porder = porder.trim(); + cNullOrder = cNullOrder.trim(); + pNullOrder = pNullOrder.trim(); int target = Math.min(corder.length(), porder.length()); - if (!corder.substring(0, target).equals(porder.substring(0, target))) { + if (!corder.substring(0, target).equals(porder.substring(0, target)) || + !cNullOrder.substring(0, target).equals(pNullOrder.substring(0, target))) { return null; } return Integer.valueOf(corder.length()).compareTo(porder.length()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java index 296fecb..ea3e179 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java @@ -201,14 +201,16 @@ public SortCol() { private List indexes = new ArrayList(); // Sort order (+|-) private char sortOrder; + private char nullSortOrder; - public SortCol(String name, int index, char sortOrder) { - this(sortOrder); + public SortCol(String name, int index, char sortOrder, char nullSortOrder) { + this(sortOrder, nullSortOrder); addAlias(name, index); } - public SortCol(char sortOrder) { + public SortCol(char sortOrder, char nullSortOrder) { this.sortOrder = sortOrder; + this.nullSortOrder = nullSortOrder; } @@ -232,11 +234,16 @@ public char getSortOrder() { return sortOrder; } + public char getNullSortOrder() { + return nullSortOrder; + } + @Override // Chooses a representative alias, index, and order to use as the String, the first is used // because it is set in the constructor public String toString() { - return "name: " + names.get(0) + " index: " + indexes.get(0) + " order: " + sortOrder; + return "name: " + names.get(0) + " index: " + indexes.get(0) + " order: " + sortOrder + + " nullOrder: " + nullSortOrder; } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java index aa41200..629fe67 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java @@ -166,6 +166,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } String sortOrder = rsDesc.getOrder(); + String nullSortOrder = rsDesc.getNullPosition(); List keyCols = rsDesc.getKeyCols(); List valCols = ExprNodeDescUtils.backtrack(joinValues, jop, parent); @@ -186,7 +187,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, newSortCols[keyIndex].addAlias(vname, vindex); } else { newBucketCols[keyIndex] = new BucketCol(vname, vindex); - newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex)); + newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex), + nullSortOrder.charAt(keyIndex)); } } } @@ -311,7 +313,8 @@ private static void findBucketingSortingColumns(List exprs, int sortIndex = indexOfColName(sortCols, columnExpr.getColumn()); if (sortIndex != -1) { if (newSortCols[sortIndex] == null) { - newSortCols[sortIndex] = new SortCol(sortCols.get(sortIndex).getSortOrder()); + newSortCols[sortIndex] = new SortCol(sortCols.get(sortIndex).getSortOrder(), + sortCols.get(sortIndex).getNullSortOrder()); } newSortCols[sortIndex].addAlias( colInfos.get(colInfosIndex).getInternalName(), colInfosIndex); @@ -436,7 +439,7 @@ private static int indexOfColName(List bucketSortCols, private static List getNewSortCols(List sortCols, List colInfos) { List newSortCols = new ArrayList(sortCols.size()); for (int i = 0; i < sortCols.size(); i++) { - SortCol sortCol = new SortCol(sortCols.get(i).getSortOrder()); + SortCol sortCol = new SortCol(sortCols.get(i).getSortOrder(), sortCols.get(i).getNullSortOrder()); for (Integer index : sortCols.get(i).getIndexes()) { // The only time this condition should be false is in the case of dynamic partitioning if (index < colInfos.size()) { @@ -537,6 +540,7 @@ static void extractTraits(BucketingSortingCtx bctx, ReduceSinkOperator rop, Oper static List extractSortCols(ReduceSinkOperator rop, List outputValues) { String sortOrder = rop.getConf().getOrder(); + String nullSortOrder = rop.getConf().getNullPosition(); List sortCols = new ArrayList(); ArrayList keyCols = rop.getConf().getKeyCols(); for (int i = 0; i < keyCols.size(); i++) { @@ -548,7 +552,8 @@ static void extractTraits(BucketingSortingCtx bctx, ReduceSinkOperator rop, Oper if (index < 0) { break; } - sortCols.add(new SortCol(((ExprNodeColumnDesc) keyCol).getColumn(), index, sortOrder.charAt(i))); + sortCols.add(new SortCol(((ExprNodeColumnDesc) keyCol).getColumn(), index, + sortOrder.charAt(i), nullSortOrder.charAt(i))); } // If the sorted columns can't all be found in the values then the data is only sorted on // the columns seen up until now @@ -649,6 +654,7 @@ protected void processGroupByReduceSink(ReduceSinkOperator rop, GroupByOperator GroupByDesc groupByDesc = gop.getConf(); String sortOrder = rop.getConf().getOrder(); + String nullSortOrder = rop.getConf().getNullPosition(); List bucketCols = new ArrayList(); List sortCols = new ArrayList(); assert rop.getConf().getKeyCols().size() <= rop.getSchema().getSignature().size(); @@ -659,7 +665,7 @@ protected void processGroupByReduceSink(ReduceSinkOperator rop, GroupByOperator } String colName = rop.getSchema().getSignature().get(i).getInternalName(); bucketCols.add(new BucketCol(colName, i)); - sortCols.add(new SortCol(colName, i, sortOrder.charAt(i))); + sortCols.add(new SortCol(colName, i, sortOrder.charAt(i), nullSortOrder.charAt(i))); } bctx.setBucketedCols(rop, bucketCols); bctx.setSortedCols(rop, sortCols); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkReduceSinkMapJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkReduceSinkMapJoinProc.java index f48fac1..237c70a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkReduceSinkMapJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/SparkReduceSinkMapJoinProc.java @@ -18,10 +18,13 @@ package org.apache.hadoop.hive.ql.optimizer.spark; -import com.google.common.base.Preconditions; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Stack; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; @@ -52,13 +55,10 @@ import org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc; import org.apache.hadoop.hive.ql.plan.SparkWork; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Stack; +import com.google.common.base.Preconditions; public class SparkReduceSinkMapJoinProc implements NodeProcessor { @@ -209,11 +209,14 @@ public Object process(Node nd, Stack stack, Map> keyExprMap = mapJoinOp.getConf().getKeys(); List keyCols = keyExprMap.get(Byte.valueOf((byte) 0)); StringBuilder keyOrder = new StringBuilder(); + StringBuilder nullPosition = new StringBuilder(); for (int i = 0; i < keyCols.size(); i++) { keyOrder.append("+"); + nullPosition.append("a"); } TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils - .getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString()); + .getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), + nullPosition.toString()); mapJoinOp.getConf().setKeyTableDesc(keyTableDesc); // let the dummy op be the parent of mapjoin op diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 28c8fdb..1f0331c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -108,6 +108,8 @@ public static int HIVE_COLUMN_ORDER_ASC = 1; public static int HIVE_COLUMN_ORDER_DESC = 0; + public static int HIVE_COLUMN_NULLS_FIRST = 0; + public static int HIVE_COLUMN_NULLS_LAST = 1; /** * ReadEntities that are passed to the hooks. @@ -656,11 +658,23 @@ private static String spliceString(String str, int i, int length, String replace for (int i = 0; i < numCh; i++) { ASTNode child = (ASTNode) ast.getChild(i); if (child.getToken().getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { - colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), - HIVE_COLUMN_ORDER_ASC)); + child = (ASTNode) child.getChild(0); + if (child.getToken().getType() == HiveParser.TOK_NULLS_FIRST) { + colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), + HIVE_COLUMN_ORDER_ASC, HIVE_COLUMN_NULLS_FIRST)); + } else { + colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), + HIVE_COLUMN_ORDER_ASC, HIVE_COLUMN_NULLS_LAST)); + } } else { - colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), - HIVE_COLUMN_ORDER_DESC)); + child = (ASTNode) child.getChild(0); + if (child.getToken().getType() == HiveParser.TOK_NULLS_LAST) { + colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), + HIVE_COLUMN_ORDER_DESC, HIVE_COLUMN_NULLS_LAST)); + } else { + colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(), + HIVE_COLUMN_ORDER_DESC, HIVE_COLUMN_NULLS_FIRST)); + } } } return colList; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index e7dc08c..94be28f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -2394,6 +2394,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException List obASTExprLst = obAST.getChildren(); ASTNode obASTExpr; + ASTNode nullObASTExpr; List> vcASTTypePairs = new ArrayList>(); RowResolver inputRR = relToHiveRR.get(srcRel); RowResolver outputRR = new RowResolver(); @@ -2406,9 +2407,11 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException for (int i = 0; i < obASTExprLst.size(); i++) { // 2.1 Convert AST Expr to ExprNode obASTExpr = (ASTNode) obASTExprLst.get(i); + nullObASTExpr = (ASTNode) obASTExpr.getChild(0); + ASTNode ref = (ASTNode) nullObASTExpr.getChild(0); Map astToExprNDescMap = TypeCheckProcFactory.genExprNode( obASTExpr, new TypeCheckCtx(inputRR)); - ExprNodeDesc obExprNDesc = astToExprNDescMap.get(obASTExpr.getChild(0)); + ExprNodeDesc obExprNDesc = astToExprNDescMap.get(ref); if (obExprNDesc == null) throw new SemanticException("Invalid order by expression: " + obASTExpr.toString()); @@ -2423,18 +2426,26 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException } else { fieldIndex = srcRelRecordSz + newVCLst.size(); newVCLst.add(rnd); - vcASTTypePairs.add(new Pair((ASTNode) obASTExpr.getChild(0), - obExprNDesc.getTypeInfo())); + vcASTTypePairs.add(new Pair(ref, obExprNDesc.getTypeInfo())); } // 2.4 Determine the Direction of order by - org.apache.calcite.rel.RelFieldCollation.Direction order = RelFieldCollation.Direction.DESCENDING; + RelFieldCollation.Direction order = RelFieldCollation.Direction.DESCENDING; if (obASTExpr.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { order = RelFieldCollation.Direction.ASCENDING; } + RelFieldCollation.NullDirection nullOrder; + if (nullObASTExpr.getType() == HiveParser.TOK_NULLS_FIRST) { + nullOrder = RelFieldCollation.NullDirection.FIRST; + } else if (nullObASTExpr.getType() == HiveParser.TOK_NULLS_LAST) { + nullOrder = RelFieldCollation.NullDirection.LAST; + } else { + throw new SemanticException( + "Unexpected null ordering option: " + nullObASTExpr.getType()); + } // 2.5 Add to field collations - fieldCollations.add(new RelFieldCollation(fieldIndex, order)); + fieldCollations.add(new RelFieldCollation(fieldIndex, order, nullOrder)); } // 3. Add Child Project Rel if needed, Generate Output RR, input Sel Rel diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 4c4470b..ac580f7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -52,6 +52,8 @@ KW_EXISTS : 'EXISTS'; KW_ASC : 'ASC'; KW_DESC : 'DESC'; +KW_NULLS : 'NULLS'; +KW_LAST : 'LAST'; KW_ORDER : 'ORDER'; KW_GROUP : 'GROUP'; KW_BY : 'BY'; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 5f14c6b..42f4090 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -81,6 +81,8 @@ TOK_GROUPING_SETS; TOK_GROUPING_SETS_EXPRESSION; TOK_HAVING; TOK_ORDERBY; +TOK_NULLS_FIRST; +TOK_NULLS_LAST; TOK_CLUSTERBY; TOK_DISTRIBUTEBY; TOK_SORTBY; @@ -401,6 +403,8 @@ import org.apache.hadoop.hive.conf.HiveConf; xlateMap.put("KW_ASC", "ASC"); xlateMap.put("KW_DESC", "DESC"); + xlateMap.put("KW_NULLS", "NULLS"); + xlateMap.put("KW_LAST", "LAST"); xlateMap.put("KW_ORDER", "ORDER"); xlateMap.put("KW_BY", "BY"); xlateMap.put("KW_GROUP", "GROUP"); @@ -2005,13 +2009,34 @@ skewedValueLocationElement skewedColumnValue | skewedColumnValuePair ; - + +orderSpecification +@init { pushMsg("order specification", state); } +@after { popMsg(state); } + : KW_ASC | KW_DESC ; + +nullOrdering +@init { pushMsg("nulls ordering", state); } +@after { popMsg(state); } + : KW_NULLS KW_FIRST -> ^(TOK_NULLS_FIRST) + | KW_NULLS KW_LAST -> ^(TOK_NULLS_LAST) + ; + columnNameOrder @init { pushMsg("column name order", state); } @after { popMsg(state); } - : identifier (asc=KW_ASC | desc=KW_DESC)? - -> {$desc == null}? ^(TOK_TABSORTCOLNAMEASC identifier) - -> ^(TOK_TABSORTCOLNAMEDESC identifier) + : identifier orderSpec=orderSpecification? nullSpec=nullOrdering? + -> {$orderSpec.tree == null && $nullSpec.tree == null}? + ^(TOK_TABSORTCOLNAMEASC ^(TOK_NULLS_FIRST identifier)) + -> {$orderSpec.tree == null}? + ^(TOK_TABSORTCOLNAMEASC ^($nullSpec identifier)) + -> {$nullSpec.tree == null && $orderSpec.tree.getType()==HiveParser.KW_ASC}? + ^(TOK_TABSORTCOLNAMEASC ^(TOK_NULLS_FIRST identifier)) + -> {$nullSpec.tree == null && $orderSpec.tree.getType()==HiveParser.KW_DESC}? + ^(TOK_TABSORTCOLNAMEDESC ^(TOK_NULLS_LAST identifier)) + -> {$orderSpec.tree.getType()==HiveParser.KW_ASC}? + ^(TOK_TABSORTCOLNAMEASC ^($nullSpec identifier)) + -> ^(TOK_TABSORTCOLNAMEDESC ^($nullSpec identifier)) ; columnNameCommentList @@ -2030,9 +2055,18 @@ columnNameComment columnRefOrder @init { pushMsg("column order", state); } @after { popMsg(state); } - : expression (asc=KW_ASC | desc=KW_DESC)? - -> {$desc == null}? ^(TOK_TABSORTCOLNAMEASC expression) - -> ^(TOK_TABSORTCOLNAMEDESC expression) + : expression orderSpec=orderSpecification? nullSpec=nullOrdering? + -> {$orderSpec.tree == null && $nullSpec.tree == null}? + ^(TOK_TABSORTCOLNAMEASC ^(TOK_NULLS_FIRST expression)) + -> {$orderSpec.tree == null}? + ^(TOK_TABSORTCOLNAMEASC ^($nullSpec expression)) + -> {$nullSpec.tree == null && $orderSpec.tree.getType()==HiveParser.KW_ASC}? + ^(TOK_TABSORTCOLNAMEASC ^(TOK_NULLS_FIRST expression)) + -> {$nullSpec.tree == null && $orderSpec.tree.getType()==HiveParser.KW_DESC}? + ^(TOK_TABSORTCOLNAMEDESC ^(TOK_NULLS_LAST expression)) + -> {$orderSpec.tree.getType()==HiveParser.KW_ASC}? + ^(TOK_TABSORTCOLNAMEASC ^($nullSpec expression)) + -> ^(TOK_TABSORTCOLNAMEDESC ^($nullSpec expression)) ; columnNameType diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 61bd10c..a192fa7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -653,8 +653,8 @@ nonReserved | KW_ENABLE | KW_ESCAPED | KW_EXCLUSIVE | KW_EXPLAIN | KW_EXPORT | KW_FIELDS | KW_FILE | KW_FILEFORMAT | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_HOUR | KW_IDXPROPERTIES | KW_IGNORE | KW_INDEX | KW_INDEXES | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR - | KW_KEYS | KW_KEY_TYPE | KW_LIMIT | KW_OFFSET | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG - | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA | KW_MINUS | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE + | KW_KEYS | KW_KEY_TYPE | KW_LAST | KW_LIMIT | KW_OFFSET | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG + | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA | KW_MINUS | KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_NULLS | KW_OFFLINE | KW_OPTION | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PARTITIONED | KW_PARTITIONS | KW_PLUS | KW_PRETTY | KW_PRINCIPALS | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER | KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_REPLICATION | KW_RESTRICT | KW_REWRITE diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java index a8980eb..ecf3cfc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java @@ -508,18 +508,27 @@ public String toString() DESC; } + public static enum NullOrder + { + NULLS_FIRST, + NULLS_LAST; + } + public static class OrderExpression extends PartitionExpression { Order order; + NullOrder nullOrder; public OrderExpression() { order = Order.ASC; + nullOrder = NullOrder.NULLS_FIRST; } public OrderExpression(PartitionExpression peSpec) { super(peSpec); order = Order.ASC; + nullOrder = NullOrder.NULLS_FIRST; } public Order getOrder() @@ -532,12 +541,23 @@ public void setOrder(Order order) this.order = order; } + public NullOrder getNullOrder() + { + return nullOrder; + } + + public void setNullOrder(NullOrder nullOrder) + { + this.nullOrder = nullOrder; + } + @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((order == null) ? 0 : order.hashCode()); + result = prime * result + ((nullOrder == null) ? 0 : nullOrder.hashCode()); return result; } @@ -557,13 +577,16 @@ public boolean equals(Object obj) if (order != other.order) { return false; } + if (nullOrder != other.nullOrder) { + return false; + } return true; } @Override public String toString() { - return String.format("%s %s", super.toString(), order); + return String.format("%s %s %s", super.toString(), order, nullOrder); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java index 2370ec0..9921b21 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java @@ -499,6 +499,7 @@ private OrderExpressionDef translate(ShapeDetails inpShape, throws SemanticException { OrderExpressionDef oexpDef = new OrderExpressionDef(); oexpDef.setOrder(oExpr.getOrder()); + oexpDef.setNullOrder(oExpr.getNullOrder()); try { PTFExpressionDef expDef = buildExpressionDef(inpShape, oExpr.getExpression()); oexpDef.setExpressionTreeString(expDef.getExpressionTreeString()); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 8a06582..85d1985 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6328,6 +6328,7 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, ArrayList partnCols = new ArrayList(); ArrayList sortCols = new ArrayList(); ArrayList sortOrders = new ArrayList(); + ArrayList nullPositions = new ArrayList(); boolean multiFileSpray = false; int numFiles = 1; int totalFiles = 1; @@ -6345,6 +6346,7 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, (dest_tab.getSortCols().size() > 0)) { sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true); sortOrders = getSortOrders(dest, qb, dest_tab, input); + nullPositions = getNullPositions(dest, qb, dest_tab, input); if (!enforceBucketing && !dest_tab.isIndexTable()) { throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName())); } else { @@ -6382,8 +6384,12 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, for (int sortOrder : sortOrders) { order.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? '+' : '-'); } - input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), maxReducers, - (AcidUtils.isAcidTable(dest_tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID)); + StringBuilder nullPosition = new StringBuilder(); + for (int pos : nullPositions) { + nullPosition.append(pos == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST ? 'a' : 'z'); + } + input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullPosition.toString(), + maxReducers, (AcidUtils.isAcidTable(dest_tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID)); reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0)); ctx.setMultiFileSpray(multiFileSpray); ctx.setNumFiles(numFiles); @@ -7344,6 +7350,23 @@ private Operator genLimitMapRedPlan(String dest, QB qb, Operator input, return orders; } + private ArrayList getNullPositions(String dest, QB qb, Table tab, Operator input) + throws SemanticException { + List tabSortCols = tab.getSortCols(); + List tabCols = tab.getCols(); + + ArrayList orders = new ArrayList(); + for (Order sortCol : tabSortCols) { + for (FieldSchema tabCol : tabCols) { + if (sortCol.getCol().equals(tabCol.getName())) { + orders.add(sortCol.getNullOrder()); + break; + } + } + } + return orders; + } + private Operator genReduceSinkPlan(String dest, QB qb, Operator input, int numReducers, boolean hasOrderBy) throws SemanticException { @@ -7384,6 +7407,7 @@ private Operator genReduceSinkPlan(String dest, QB qb, Operator input, } ArrayList sortCols = new ArrayList(); StringBuilder order = new StringBuilder(); + StringBuilder nullPosition = new StringBuilder(); if (sortExprs != null) { int ccount = sortExprs.getChildCount(); for (int i = 0; i < ccount; ++i) { @@ -7393,20 +7417,40 @@ private Operator genReduceSinkPlan(String dest, QB qb, Operator input, // SortBy ASC order.append("+"); cl = (ASTNode) cl.getChild(0); + if (cl.getType() == HiveParser.TOK_NULLS_FIRST) { + nullPosition.append("a"); + } else if (cl.getType() == HiveParser.TOK_NULLS_LAST) { + nullPosition.append("z"); + } else { + throw new SemanticException( + "Unexpected null ordering option: " + cl.getType()); + } + cl = (ASTNode) cl.getChild(0); } else if (cl.getType() == HiveParser.TOK_TABSORTCOLNAMEDESC) { // SortBy DESC order.append("-"); cl = (ASTNode) cl.getChild(0); + if (cl.getType() == HiveParser.TOK_NULLS_FIRST) { + nullPosition.append("a"); + } else if (cl.getType() == HiveParser.TOK_NULLS_LAST) { + nullPosition.append("z"); + } else { + throw new SemanticException( + "Unexpected null ordering option: " + cl.getType()); + } + cl = (ASTNode) cl.getChild(0); } else { // ClusterBy order.append("+"); + nullPosition.append("a"); } ExprNodeDesc exprNode = genExprNodeDesc(cl, inputRR); sortCols.add(exprNode); } } Operator result = genReduceSinkPlan( - input, partCols, sortCols, order.toString(), numReducers, Operation.NOT_ACID); + input, partCols, sortCols, order.toString(), nullPosition.toString(), + numReducers, Operation.NOT_ACID); if (result.getParentOperators().size() == 1 && result.getParentOperators().get(0) instanceof ReduceSinkOperator) { ((ReduceSinkOperator) result.getParentOperators().get(0)) @@ -7418,7 +7462,8 @@ private Operator genReduceSinkPlan(String dest, QB qb, Operator input, @SuppressWarnings("nls") private Operator genReduceSinkPlan(Operator input, ArrayList partitionCols, ArrayList sortCols, - String sortOrder, int numReducers, AcidUtils.Operation acidOp) throws SemanticException { + String sortOrder, String nullPosition, int numReducers, AcidUtils.Operation acidOp) + throws SemanticException { RowResolver inputRR = opParseCtx.get(input).getRowResolver(); @@ -7485,7 +7530,7 @@ private Operator genReduceSinkPlan(Operator input, dummy.setParentOperators(null); ReduceSinkDesc rsdesc = PlanUtils.getReduceSinkDesc(sortCols, valueCols, outputColumns, - false, -1, partitionCols, sortOrder, numReducers, acidOp); + false, -1, partitionCols, sortOrder, nullPosition, numReducers, acidOp); Operator interim = putOpInsertMap(OperatorFactory.getAndMakeChild(rsdesc, new RowSchema(rsRR.getColumnInfos()), input), rsRR); @@ -11522,7 +11567,7 @@ private void processPositionAlias(ASTNode ast) throws SemanticException { } } for (int child_pos = 0; child_pos < orderbyNode.getChildCount(); ++child_pos) { - ASTNode colNode = (ASTNode) orderbyNode.getChild(child_pos); + ASTNode colNode = (ASTNode) orderbyNode.getChild(child_pos).getChild(0); ASTNode node = (ASTNode) colNode.getChild(0); if (node.getToken().getType() == HiveParser.Number) { if( isByPos ) { @@ -11854,13 +11899,20 @@ private OrderSpec processOrderSpec(ASTNode sortNode) { int exprCnt = sortNode.getChildCount(); for(int i=0; i < exprCnt; i++) { OrderExpression exprSpec = new OrderExpression(); - exprSpec.setExpression((ASTNode) sortNode.getChild(i).getChild(0)); - if ( sortNode.getChild(i).getType() == HiveParser.TOK_TABSORTCOLNAMEASC ) { + ASTNode orderSpec = (ASTNode) sortNode.getChild(i); + ASTNode nullOrderSpec = (ASTNode) orderSpec.getChild(0); + exprSpec.setExpression((ASTNode) nullOrderSpec.getChild(0)); + if ( orderSpec.getType() == HiveParser.TOK_TABSORTCOLNAMEASC ) { exprSpec.setOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.ASC); } else { exprSpec.setOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC); } + if ( nullOrderSpec.getType() == HiveParser.TOK_NULLS_FIRST ) { + exprSpec.setNullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_FIRST); + } else { + exprSpec.setNullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder.NULLS_LAST); + } oSpec.addExpression(exprSpec); } return oSpec; @@ -12198,7 +12250,8 @@ void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef, RowResolver inputRR, ArrayList partCols, ArrayList orderCols, - StringBuilder orderString) throws SemanticException { + StringBuilder orderString, + StringBuilder nullPositionString) throws SemanticException { List partColList = tabDef.getPartition().getExpressions(); @@ -12208,6 +12261,7 @@ void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef, partCols.add(exprNode); orderCols.add(exprNode); orderString.append('+'); + nullPositionString.append('a'); } } @@ -12222,13 +12276,16 @@ void buildPTFReduceSinkDetails(PartitionedTableFunctionDef tabDef, for (int i = 0; i < orderColList.size(); i++) { OrderExpressionDef colDef = orderColList.get(i); char orderChar = colDef.getOrder() == PTFInvocationSpec.Order.ASC ? '+' : '-'; + char nullPositionChar = colDef.getNullOrder() == PTFInvocationSpec.NullOrder.NULLS_FIRST ? 'a' : 'z'; int index = ExprNodeDescUtils.indexOf(colDef.getExprNode(), orderCols); if (index >= 0) { orderString.setCharAt(index, orderChar); + nullPositionString.setCharAt(index, nullPositionChar); continue; } orderCols.add(colDef.getExprNode()); orderString.append(orderChar); + nullPositionString.append(nullPositionChar); } } @@ -12271,6 +12328,7 @@ private Operator genPTFPlanForComponentQuery(PTFInvocationSpec ptfQSpec, Operato ArrayList partCols = new ArrayList(); ArrayList orderCols = new ArrayList(); StringBuilder orderString = new StringBuilder(); + StringBuilder nullPositionString = new StringBuilder(); /* * Use the input RR of TableScanOperator in case there is no map-side @@ -12278,8 +12336,9 @@ private Operator genPTFPlanForComponentQuery(PTFInvocationSpec ptfQSpec, Operato * If the parent of ReduceSinkOperator is PTFOperator, use it's * output RR. */ - buildPTFReduceSinkDetails(tabDef, rr, partCols, orderCols, orderString); - input = genReduceSinkPlan(input, partCols, orderCols, orderString.toString(), -1, Operation.NOT_ACID); + buildPTFReduceSinkDetails(tabDef, rr, partCols, orderCols, orderString, nullPositionString); + input = genReduceSinkPlan(input, partCols, orderCols, orderString.toString(), + nullPositionString.toString(), -1, Operation.NOT_ACID); } /* @@ -12337,6 +12396,7 @@ private Operator genReduceSinkPlanForWindowing(WindowingSpec spec, ArrayList partCols = new ArrayList(); ArrayList orderCols = new ArrayList(); StringBuilder order = new StringBuilder(); + StringBuilder nullPosition = new StringBuilder(); for (PartitionExpression partCol : spec.getQueryPartitionSpec().getExpressions()) { ExprNodeDesc partExpr = genExprNodeDesc(partCol.getExpression(), inputRR); @@ -12344,6 +12404,7 @@ private Operator genReduceSinkPlanForWindowing(WindowingSpec spec, partCols.add(partExpr); orderCols.add(partExpr); order.append('+'); + nullPosition.append('a'); } } @@ -12351,17 +12412,21 @@ private Operator genReduceSinkPlanForWindowing(WindowingSpec spec, for (OrderExpression orderCol : spec.getQueryOrderSpec().getExpressions()) { ExprNodeDesc orderExpr = genExprNodeDesc(orderCol.getExpression(), inputRR); char orderChar = orderCol.getOrder() == PTFInvocationSpec.Order.ASC ? '+' : '-'; + char nullPositionChar = orderCol.getNullOrder() == PTFInvocationSpec.NullOrder.NULLS_FIRST ? 'a' : 'z'; int index = ExprNodeDescUtils.indexOf(orderExpr, orderCols); if (index >= 0) { order.setCharAt(index, orderChar); + nullPosition.setCharAt(index, nullPositionChar); continue; } orderCols.add(genExprNodeDesc(orderCol.getExpression(), inputRR)); order.append(orderChar); + nullPosition.append(nullPositionChar); } } - return genReduceSinkPlan(input, partCols, orderCols, order.toString(), -1, Operation.NOT_ACID); + return genReduceSinkPlan(input, partCols, orderCols, order.toString(), nullPosition.toString(), + -1, Operation.NOT_ACID); } public static ArrayList parseSelect(String selectExprStr) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java index 598520c..45dfd27 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java @@ -735,6 +735,8 @@ public ColumnExprProcessor getColumnExprProcessor() { windowingTokens.add(HiveParser.KW_CURRENT); windowingTokens.add(HiveParser.TOK_TABSORTCOLNAMEASC); windowingTokens.add(HiveParser.TOK_TABSORTCOLNAMEDESC); + windowingTokens.add(HiveParser.TOK_NULLS_FIRST); + windowingTokens.add(HiveParser.TOK_NULLS_LAST); } protected static boolean isRedundantConversionFunction(ASTNode expr, diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 04d26f3..3dfd12e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -23,15 +23,12 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; @@ -39,7 +36,6 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.exec.ColumnInfo; -import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -66,8 +62,8 @@ import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -76,6 +72,8 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.TextInputFormat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * PlanUtils. @@ -389,7 +387,7 @@ public static TableDesc getDefaultTableDesc(String separatorCode) { * Generate the table descriptor for reduce key. */ public static TableDesc getReduceKeyTableDesc(List fieldSchemas, - String order) { + String order, String nullPosition) { return new TableDesc( SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties(serdeConstants.LIST_COLUMNS, MetaStoreUtils @@ -397,6 +395,7 @@ public static TableDesc getReduceKeyTableDesc(List fieldSchemas, serdeConstants.LIST_COLUMN_TYPES, MetaStoreUtils .getColumnTypesFromFieldSchema(fieldSchemas), serdeConstants.SERIALIZATION_SORT_ORDER, order, + serdeConstants.SERIALIZATION_NULL_POSITION, nullPosition, serdeConstants.SERIALIZATION_LIB, BinarySortableSerDe.class.getName())); } @@ -411,8 +410,10 @@ public static TableDesc getMapJoinKeyTableDesc(Configuration conf, // be broadcast (instead of partitioned). As a consequence we use // a different SerDe than in the MR mapjoin case. StringBuilder order = new StringBuilder(); + StringBuilder nullPosition = new StringBuilder(); for (FieldSchema f: fieldSchemas) { order.append("+"); + nullPosition.append("a"); } return new TableDesc( SequenceFileInputFormat.class, SequenceFileOutputFormat.class, @@ -421,6 +422,7 @@ public static TableDesc getMapJoinKeyTableDesc(Configuration conf, serdeConstants.LIST_COLUMN_TYPES, MetaStoreUtils .getColumnTypesFromFieldSchema(fieldSchemas), serdeConstants.SERIALIZATION_SORT_ORDER, order.toString(), + serdeConstants.SERIALIZATION_NULL_POSITION, nullPosition.toString(), serdeConstants.SERIALIZATION_LIB, BinarySortableSerDe.class.getName())); } else { return new TableDesc(SequenceFileInputFormat.class, @@ -606,15 +608,15 @@ public int compare(FieldSchema o1, FieldSchema o2) { public static ReduceSinkDesc getReduceSinkDesc( ArrayList keyCols, ArrayList valueCols, List outputColumnNames, boolean includeKeyCols, int tag, - ArrayList partitionCols, String order, int numReducers, - AcidUtils.Operation writeType) { + ArrayList partitionCols, String order, String nullPosition, + int numReducers, AcidUtils.Operation writeType) { return getReduceSinkDesc(keyCols, keyCols.size(), valueCols, new ArrayList>(), includeKeyCols ? outputColumnNames.subList(0, keyCols.size()) : new ArrayList(), includeKeyCols ? outputColumnNames.subList(keyCols.size(), outputColumnNames.size()) : outputColumnNames, - includeKeyCols, tag, partitionCols, order, numReducers, writeType); + includeKeyCols, tag, partitionCols, order, nullPosition, numReducers, writeType); } /** @@ -651,8 +653,8 @@ public static ReduceSinkDesc getReduceSinkDesc( List outputKeyColumnNames, List outputValueColumnNames, boolean includeKeyCols, int tag, - ArrayList partitionCols, String order, int numReducers, - AcidUtils.Operation writeType) { + ArrayList partitionCols, String order, String nullPosition, + int numReducers, AcidUtils.Operation writeType) { TableDesc keyTable = null; TableDesc valueTable = null; ArrayList outputKeyCols = new ArrayList(); @@ -663,11 +665,14 @@ public static ReduceSinkDesc getReduceSinkDesc( if (order.length() < outputKeyColumnNames.size()) { order = order + "+"; } - keyTable = getReduceKeyTableDesc(keySchema, order); + if (nullPosition.length() < outputKeyColumnNames.size()) { + nullPosition = nullPosition + "a"; + } + keyTable = getReduceKeyTableDesc(keySchema, order, nullPosition); outputKeyCols.addAll(outputKeyColumnNames); } else { keyTable = getReduceKeyTableDesc(getFieldSchemasFromColumnList( - keyCols, "reducesinkkey"),order); + keyCols, "reducesinkkey"), order, nullPosition); for (int i = 0; i < keyCols.size(); i++) { outputKeyCols.add("reducesinkkey" + i); } @@ -764,12 +769,14 @@ public static ReduceSinkDesc getReduceSinkDesc( } StringBuilder order = new StringBuilder(); + StringBuilder nullPosition = new StringBuilder(); for (int i = 0; i < keyCols.size(); i++) { order.append("+"); + nullPosition.append("a"); } return getReduceSinkDesc(keyCols, numKeys, valueCols, distinctColIndices, outputKeyColumnNames, outputValueColumnNames, includeKey, tag, - partitionCols, order.toString(), numReducers, writeType); + partitionCols, order.toString(), nullPosition.toString(), numReducers, writeType); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java index 41d9ffe..a27d6b3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java @@ -22,10 +22,10 @@ import java.util.EnumSet; import java.util.List; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @@ -380,6 +380,24 @@ public boolean isOrdering() { return false; } + /** + * Returns the null order in the key columns. + * + * @return null, which means default for all key columns, or a String + * of the same length as key columns, that consists of only "a" + * (null first) and "z" (null last). + */ + public String getNullPosition() { + return keySerializeInfo.getProperties().getProperty( + org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_POSITION); + } + + public void setNullPosition(String nullPositionStr) { + keySerializeInfo.getProperties().setProperty( + org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_NULL_POSITION, + nullPositionStr); + } + public List> getDistinctColumnIndices() { return distinctColumnIndices; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/OrderExpressionDef.java ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/OrderExpressionDef.java index e367d13..827911a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/OrderExpressionDef.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/OrderExpressionDef.java @@ -18,15 +18,18 @@ package org.apache.hadoop.hive.ql.plan.ptf; +import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order; public class OrderExpressionDef extends PTFExpressionDef { private Order order; + private NullOrder nullOrder; public OrderExpressionDef() {} public OrderExpressionDef(PTFExpressionDef e) { super(e); order = Order.ASC; + nullOrder = NullOrder.NULLS_FIRST; } public Order getOrder() { @@ -36,5 +39,13 @@ public Order getOrder() { public void setOrder(Order order) { this.order = order; } -} + public NullOrder getNullOrder() { + return nullOrder; + } + + public void setNullOrder(NullOrder nullOrder) { + this.nullOrder = nullOrder; + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PartitionedTableFunctionDef.java ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PartitionedTableFunctionDef.java index 2a8b1c0..54b0fb8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PartitionedTableFunctionDef.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PartitionedTableFunctionDef.java @@ -122,8 +122,16 @@ public String getOrderExplain() { builder.append(", "); } builder.append(expression.getExprNode().getExprString()); - if (expression.getOrder() == PTFInvocationSpec.Order.DESC) { - builder.append("(DESC)"); + builder.append(" "); + if (expression.getOrder() == PTFInvocationSpec.Order.ASC) { + builder.append("ASC "); + } else { + builder.append("DESC "); + } + if (expression.getNullOrder() == PTFInvocationSpec.NullOrder.NULLS_FIRST) { + builder.append("NULLS FIRST"); + } else { + builder.append("NULLS LAST"); } } return builder.toString(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java index 2c076f50..2ac4039 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java @@ -27,19 +27,18 @@ import java.util.Map; import org.apache.commons.lang.ArrayUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.PTFOperator; import org.apache.hadoop.hive.ql.exec.PTFPartition; -import org.apache.hadoop.hive.ql.exec.WindowFunctionInfo; import org.apache.hadoop.hive.ql.exec.PTFPartition.PTFPartitionIterator; import org.apache.hadoop.hive.ql.exec.PTFRollingPartition; +import org.apache.hadoop.hive.ql.exec.WindowFunctionInfo; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec; @@ -62,6 +61,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @SuppressWarnings("deprecation") public class WindowingTableFunction extends TableFunctionEvaluator { @@ -110,6 +111,7 @@ public void execute(PTFPartitionIterator pItr, PTFPartition outP) throws WindowTableFunctionDef wTFnDef = (WindowTableFunctionDef) getTableDef(); Order order = wTFnDef.getOrder().getExpressions().get(0).getOrder(); + NullOrder nullOrder = wTFnDef.getOrder().getExpressions().get(0).getNullOrder(); for(WindowFunctionDef wFn : wTFnDef.getWindowFunctions()) { boolean processWindow = processWindow(wFn); @@ -121,7 +123,7 @@ public void execute(PTFPartitionIterator pItr, PTFPartition outP) throws } oColumns.add((List)out); } else { - oColumns.add(executeFnwithWindow(getQueryDef(), wFn, iPart, order)); + oColumns.add(executeFnwithWindow(getQueryDef(), wFn, iPart, order, nullOrder)); } } @@ -421,7 +423,7 @@ public void startPartition() throws HiveException { int rowToProcess = streamingState.rollingPart.rowToProcess(wFn.getWindowFrame()); if (rowToProcess >= 0) { Range rng = getRange(wFn, rowToProcess, streamingState.rollingPart, - streamingState.order); + streamingState.order, streamingState.nullOrder); PTFPartitionIterator rItr = rng.iterator(); PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr); Object out = evaluateWindowFunction(wFn, rItr); @@ -499,7 +501,7 @@ public void startPartition() throws HiveException { int rowToProcess = streamingState.rollingPart.size() - numRowsRemaining; if (rowToProcess >= 0) { Range rng = getRange(wFn, rowToProcess, streamingState.rollingPart, - streamingState.order); + streamingState.order, streamingState.nullOrder); PTFPartitionIterator rItr = rng.iterator(); PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr); Object out = evaluateWindowFunction(wFn, rItr); @@ -659,11 +661,12 @@ public boolean carryForwardNames() { ArrayList executeFnwithWindow(PTFDesc ptfDesc, WindowFunctionDef wFnDef, PTFPartition iPart, - Order order) + Order order, + NullOrder nullOrder) throws HiveException { ArrayList vals = new ArrayList(); for(int i=0; i < iPart.size(); i++) { - Range rng = getRange(wFnDef, i, iPart, order); + Range rng = getRange(wFnDef, i, iPart, order, nullOrder); PTFPartitionIterator rItr = rng.iterator(); PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr); Object out = evaluateWindowFunction(wFnDef, rItr); @@ -672,7 +675,7 @@ public boolean carryForwardNames() { return vals; } - private Range getRange(WindowFunctionDef wFnDef, int currRow, PTFPartition p, Order order) throws HiveException + private Range getRange(WindowFunctionDef wFnDef, int currRow, PTFPartition p, Order order, NullOrder nullOrder) throws HiveException { BoundaryDef startB = wFnDef.getWindowFrame().getStart(); BoundaryDef endB = wFnDef.getWindowFrame().getEnd(); @@ -691,10 +694,10 @@ private Range getRange(WindowFunctionDef wFnDef, int currRow, PTFPartition p, Or else { ValueBoundaryScanner vbs; if ( startB instanceof ValueBoundaryDef ) { - vbs = ValueBoundaryScanner.getScanner((ValueBoundaryDef)startB, order); + vbs = ValueBoundaryScanner.getScanner((ValueBoundaryDef)startB, order, nullOrder); } else { - vbs = ValueBoundaryScanner.getScanner((ValueBoundaryDef)endB, order); + vbs = ValueBoundaryScanner.getScanner((ValueBoundaryDef)endB, order, nullOrder); } vbs.reset(startB); start = vbs.computeStart(currRow, p); @@ -775,12 +778,14 @@ public Range(int start, int end, PTFPartition p) { BoundaryDef bndDef; Order order; + NullOrder nullOrder; PTFExpressionDef expressionDef; - public ValueBoundaryScanner(BoundaryDef bndDef, Order order, PTFExpressionDef expressionDef) + public ValueBoundaryScanner(BoundaryDef bndDef, Order order, NullOrder nullOrder, PTFExpressionDef expressionDef) { this.bndDef = bndDef; this.order = order; + this.nullOrder = nullOrder; this.expressionDef = expressionDef; } @@ -1135,7 +1140,7 @@ public Object computeValue(Object row) throws HiveException { @SuppressWarnings("incomplete-switch") - public static ValueBoundaryScanner getScanner(ValueBoundaryDef vbDef, Order order) + public static ValueBoundaryScanner getScanner(ValueBoundaryDef vbDef, Order order, NullOrder nullOrder) throws HiveException { PrimitiveObjectInspector pOI = (PrimitiveObjectInspector) vbDef.getOI(); switch(pOI.getPrimitiveCategory()) { @@ -1144,16 +1149,16 @@ public static ValueBoundaryScanner getScanner(ValueBoundaryDef vbDef, Order orde case LONG: case SHORT: case TIMESTAMP: - return new LongValueBoundaryScanner(vbDef, order, vbDef.getExpressionDef()); + return new LongValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef()); case DOUBLE: case FLOAT: - return new DoubleValueBoundaryScanner(vbDef, order, vbDef.getExpressionDef()); + return new DoubleValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef()); case DECIMAL: - return new HiveDecimalValueBoundaryScanner(vbDef, order, vbDef.getExpressionDef()); + return new HiveDecimalValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef()); case DATE: - return new DateValueBoundaryScanner(vbDef, order, vbDef.getExpressionDef()); + return new DateValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef()); case STRING: - return new StringValueBoundaryScanner(vbDef, order, vbDef.getExpressionDef()); + return new StringValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef()); } throw new HiveException( String.format("Internal Error: attempt to setup a Window for datatype %s", @@ -1162,9 +1167,9 @@ public static ValueBoundaryScanner getScanner(ValueBoundaryDef vbDef, Order orde } public static class LongValueBoundaryScanner extends ValueBoundaryScanner { - public LongValueBoundaryScanner(BoundaryDef bndDef, Order order, + public LongValueBoundaryScanner(BoundaryDef bndDef, Order order, NullOrder nullOrder, PTFExpressionDef expressionDef) { - super(bndDef,order,expressionDef); + super(bndDef,order,nullOrder,expressionDef); } @Override @@ -1196,8 +1201,8 @@ public boolean isEqual(Object v1, Object v2) { public static class DoubleValueBoundaryScanner extends ValueBoundaryScanner { public DoubleValueBoundaryScanner(BoundaryDef bndDef, Order order, - PTFExpressionDef expressionDef) { - super(bndDef,order,expressionDef); + NullOrder nullOrder, PTFExpressionDef expressionDef) { + super(bndDef,order,nullOrder,expressionDef); } @Override @@ -1229,8 +1234,8 @@ public boolean isEqual(Object v1, Object v2) { public static class HiveDecimalValueBoundaryScanner extends ValueBoundaryScanner { public HiveDecimalValueBoundaryScanner(BoundaryDef bndDef, Order order, - PTFExpressionDef expressionDef) { - super(bndDef,order,expressionDef); + NullOrder nullOrder, PTFExpressionDef expressionDef) { + super(bndDef,order,nullOrder,expressionDef); } @Override @@ -1262,8 +1267,8 @@ public boolean isEqual(Object v1, Object v2) { public static class DateValueBoundaryScanner extends ValueBoundaryScanner { public DateValueBoundaryScanner(BoundaryDef bndDef, Order order, - PTFExpressionDef expressionDef) { - super(bndDef,order,expressionDef); + NullOrder nullOrder, PTFExpressionDef expressionDef) { + super(bndDef,order,nullOrder,expressionDef); } @Override @@ -1290,8 +1295,8 @@ public boolean isEqual(Object v1, Object v2) { public static class StringValueBoundaryScanner extends ValueBoundaryScanner { public StringValueBoundaryScanner(BoundaryDef bndDef, Order order, - PTFExpressionDef expressionDef) { - super(bndDef,order,expressionDef); + NullOrder nullOrder, PTFExpressionDef expressionDef) { + super(bndDef,order,nullOrder,expressionDef); } @Override @@ -1347,6 +1352,7 @@ public int size() { int[] wFnsToProcess; WindowTableFunctionDef wTFnDef; Order order; + NullOrder nullOrder; PTFDesc ptfDesc; StructObjectInspector inputOI; AggregationBuffer[] aggBuffers; @@ -1362,6 +1368,7 @@ public int size() { this.currIdx = 0; wTFnDef = (WindowTableFunctionDef) getTableDef(); order = wTFnDef.getOrder().getExpressions().get(0).getOrder(); + nullOrder = wTFnDef.getOrder().getExpressions().get(0).getNullOrder(); ptfDesc = getQueryDef(); inputOI = iPart.getOutputOI(); @@ -1416,7 +1423,7 @@ public Object next() { out = ObjectInspectorUtils.copyToStandardObject(out, wFn.getOI()); output.set(j, out); } else { - Range rng = getRange(wFn, currIdx, iPart, order); + Range rng = getRange(wFn, currIdx, iPart, order, nullOrder); PTFPartitionIterator rItr = rng.iterator(); PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr); output.set(j, evaluateWindowFunction(wFn, rItr)); @@ -1453,6 +1460,7 @@ public void remove() { AggregationBuffer[] aggBuffers; Object[][] funcArgs; Order order; + NullOrder nullOrder; RankLimit rnkLimit; @SuppressWarnings("unchecked") @@ -1467,6 +1475,7 @@ public void remove() { precedingSpan, followingSpan); order = tabDef.getOrder().getExpressions().get(0).getOrder(); + nullOrder = tabDef.getOrder().getExpressions().get(0).getNullOrder(); int numFns = tabDef.getWindowFunctions().size(); fnOutputs = new ArrayList[numFns]; diff --git ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java index cf7eb70..7f6430f 100644 --- ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java +++ ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java @@ -224,7 +224,7 @@ public void inputSplitNullBase() throws Exception { @Test public void sortedTable() throws Exception { List sortCols = new ArrayList(1); - sortCols.add(new Order("b", 1)); + sortCols.add(new Order("b", 1, 0)); Table t = newTable("default", "st", false, new HashMap(), sortCols, false); @@ -249,7 +249,7 @@ public void sortedTable() throws Exception { @Test public void sortedPartition() throws Exception { List sortCols = new ArrayList(1); - sortCols.add(new Order("b", 1)); + sortCols.add(new Order("b", 1, 0)); Table t = newTable("default", "sp", true, new HashMap(), sortCols, false); Partition p = newPartition(t, "today", sortCols); diff --git ql/src/test/queries/clientpositive/order_null.q ql/src/test/queries/clientpositive/order_null.q new file mode 100644 index 0000000..61a2c16 --- /dev/null +++ ql/src/test/queries/clientpositive/order_null.q @@ -0,0 +1,29 @@ +create table src_null (a int, b string); +insert into src_null values (1, 'A'); +insert into src_null values (null, null); +insert into src_null values (2, 'B'); +insert into src_null values (2, 'A'); +insert into src_null values (2, null); +insert into src_null values (3, null); + +SELECT x.* FROM src_null x ORDER BY a asc; + +SELECT x.* FROM src_null x ORDER BY a desc; + +SELECT x.* FROM src_null x ORDER BY b asc; + +SELECT x.* FROM src_null x ORDER BY b desc; + +SELECT x.* FROM src_null x ORDER BY a asc nulls first; + +SELECT x.* FROM src_null x ORDER BY a desc nulls first; + +SELECT x.* FROM src_null x ORDER BY b asc nulls last; + +SELECT x.* FROM src_null x ORDER BY b desc nulls last; + +SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc; + +SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last; + +SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last; diff --git ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out index 3b71598..9eea7f7 100644 --- ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out +++ ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out @@ -232,7 +232,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 8 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src @@ -281,7 +281,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 8 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test changing the bucket columns @@ -326,7 +326,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 8 Bucket Columns: [value] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src @@ -375,7 +375,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 8 Bucket Columns: [value] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test changing the number of buckets @@ -420,7 +420,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [value] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src @@ -469,7 +469,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [value] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test changing the sort columns @@ -514,7 +514,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:0)] +Sort Columns: [Order(col:value, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src @@ -563,7 +563,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:0)] +Sort Columns: [Order(col:value, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test changing the sort order @@ -608,7 +608,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src @@ -657,7 +657,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test a sorted partition gets converted to unsorted diff --git ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out index cab3de4..af2f47a 100644 --- ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out +++ ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out @@ -408,7 +408,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 12 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test changing sort order @@ -455,7 +455,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 12 Bucket Columns: [key] -Sort Columns: [Order(col:value, order:0)] +Sort Columns: [Order(col:value, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test removing test order diff --git ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out index 184d2e4..46d5b34 100644 --- ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out +++ ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out @@ -110,7 +110,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [b] -Sort Columns: [Order(col:b, order:0)] +Sort Columns: [Order(col:b, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Turn off clustering for a partition @@ -200,7 +200,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [a, b] -Sort Columns: [Order(col:a, order:0), Order(col:b, order:1)] +Sort Columns: [Order(col:a, order:0, nullOrder:1), Order(col:b, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: drop table alter_table_partition_clusterby_sortby diff --git ql/src/test/results/clientpositive/alter_table_not_sorted.q.out ql/src/test/results/clientpositive/alter_table_not_sorted.q.out index 6e1ec59..8a88bc4 100644 --- ql/src/test/results/clientpositive/alter_table_not_sorted.q.out +++ ql/src/test/results/clientpositive/alter_table_not_sorted.q.out @@ -34,7 +34,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [a] -Sort Columns: [Order(col:a, order:1)] +Sort Columns: [Order(col:a, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: alter table alter_table_not_sorted not sorted diff --git ql/src/test/results/clientpositive/authorization_index.q.out ql/src/test/results/clientpositive/authorization_index.q.out index adc02ad..a6ec998 100644 --- ql/src/test/results/clientpositive/authorization_index.q.out +++ ql/src/test/results/clientpositive/authorization_index.q.out @@ -41,7 +41,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat Compressed: No Num Buckets: -1 Bucket Columns: [] -Sort Columns: [Order(col:a, order:1)] +Sort Columns: [Order(col:a, order:1, nullOrder:0)] PREHOOK: query: alter index t1_index on t1 rebuild PREHOOK: type: ALTERINDEX_REBUILD PREHOOK: Input: default@t1 diff --git ql/src/test/results/clientpositive/bucket5.q.out ql/src/test/results/clientpositive/bucket5.q.out index 46e3d54..5caf5c3 100644 --- ql/src/test/results/clientpositive/bucket5.q.out +++ ql/src/test/results/clientpositive/bucket5.q.out @@ -533,7 +533,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 2 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: SELECT * FROM bucketed_table TABLESAMPLE (BUCKET 1 OUT OF 2) s LIMIT 10 diff --git ql/src/test/results/clientpositive/bucketmapjoin7.q.out ql/src/test/results/clientpositive/bucketmapjoin7.q.out index 1b9eb47..d6e2e4f 100644 --- ql/src/test/results/clientpositive/bucketmapjoin7.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin7.q.out @@ -126,15 +126,17 @@ TOK_QUERY value TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - b - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + b + value TOK_LIMIT 1 diff --git ql/src/test/results/clientpositive/correlationoptimizer12.q.out ql/src/test/results/clientpositive/correlationoptimizer12.q.out index 8b8952d..237368a 100644 --- ql/src/test/results/clientpositive/correlationoptimizer12.q.out +++ ql/src/test/results/clientpositive/correlationoptimizer12.q.out @@ -47,7 +47,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 + order by: _col0 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -132,7 +132,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 + order by: _col0 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/ctas.q.out ql/src/test/results/clientpositive/ctas.q.out index bf6db75..c03d82b 100644 --- ql/src/test/results/clientpositive/ctas.q.out +++ ql/src/test/results/clientpositive/ctas.q.out @@ -718,11 +718,13 @@ TOK_CREATETABLE value TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/ctas_colname.q.out ql/src/test/results/clientpositive/ctas_colname.q.out index 435b9e0..7c86c66 100644 --- ql/src/test/results/clientpositive/ctas_colname.q.out +++ ql/src/test/results/clientpositive/ctas_colname.q.out @@ -193,7 +193,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -354,7 +354,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out index 8813802..5f078f4 100644 --- ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out +++ ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out @@ -1155,7 +1155,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__") @@ -1196,7 +1196,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select count(*) from over1k_part_orc @@ -2129,7 +2129,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2170,7 +2170,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc @@ -2339,7 +2339,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2380,7 +2380,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc diff --git ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out index 2105d8b..9222d87 100644 --- ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out +++ ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out @@ -1060,7 +1060,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__") @@ -1101,7 +1101,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select count(*) from over1k_part @@ -2027,7 +2027,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2068,7 +2068,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select * from over1k_part_buck_sort2 @@ -2170,7 +2170,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2211,7 +2211,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select * from over1k_part_buck_sort2 diff --git ql/src/test/results/clientpositive/explain_logical.q.out ql/src/test/results/clientpositive/explain_logical.q.out index 59bd9e9..bf35cd5 100644 --- ql/src/test/results/clientpositive/explain_logical.q.out +++ ql/src/test/results/clientpositive/explain_logical.q.out @@ -793,10 +793,11 @@ TOK_QUERY value TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - s1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + s1 + key LOGICAL PLAN: diff --git ql/src/test/results/clientpositive/groupby_grouping_window.q.out ql/src/test/results/clientpositive/groupby_grouping_window.q.out index 0324836..5cd9737 100644 --- ql/src/test/results/clientpositive/groupby_grouping_window.q.out +++ ql/src/test/results/clientpositive/groupby_grouping_window.q.out @@ -101,7 +101,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3 + order by: _col3 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/groupby_resolution.q.out ql/src/test/results/clientpositive/groupby_resolution.q.out index 83fc576..76b5238 100644 --- ql/src/test/results/clientpositive/groupby_resolution.q.out +++ ql/src/test/results/clientpositive/groupby_resolution.q.out @@ -681,7 +681,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: 0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/index_skewtable.q.out ql/src/test/results/clientpositive/index_skewtable.q.out index 972789d..45e6834 100644 --- ql/src/test/results/clientpositive/index_skewtable.q.out +++ ql/src/test/results/clientpositive/index_skewtable.q.out @@ -57,7 +57,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat Compressed: No Num Buckets: -1 Bucket Columns: [] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] PREHOOK: query: ALTER INDEX kv_index ON kv REBUILD PREHOOK: type: ALTERINDEX_REBUILD PREHOOK: Input: default@kv diff --git ql/src/test/results/clientpositive/infer_bucket_sort.q.out ql/src/test/results/clientpositive/infer_bucket_sort.q.out index 182dd3a..709ca10 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort.q.out @@ -62,7 +62,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by where a key isn't selected, should not be bucketed or sorted @@ -168,7 +168,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys @@ -221,7 +221,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join with two keys and only one selected, should not be bucketed or sorted @@ -327,7 +327,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key @@ -380,7 +380,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test distribute by, should only be bucketed by key @@ -486,7 +486,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: -1 Bucket Columns: [] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test sort by desc, should be sorted by key @@ -539,7 +539,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: -1 Bucket Columns: [] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test cluster by, should be bucketed and sorted by key @@ -592,7 +592,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other @@ -645,7 +645,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join in simple subquery, should be bucketed and sorted on key @@ -698,7 +698,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join in simple subquery renaming key column, should be bucketed and sorted on key @@ -751,7 +751,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in simple subquery, should be bucketed and sorted on key @@ -804,7 +804,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key @@ -857,7 +857,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery with where outside, should still be bucketed and sorted on key @@ -910,7 +910,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery with expression on value, should still be bucketed and sorted on key @@ -963,7 +963,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery with lateral view outside, should still be bucketed and sorted @@ -1016,7 +1016,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the @@ -1071,7 +1071,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery with select on outside reordering the columns, should be bucketed and @@ -1126,7 +1126,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key @@ -1179,7 +1179,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery followed by sort by, should only be sorted by the sort key @@ -1232,7 +1232,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery followed by transform script, should not be bucketed or sorted @@ -1338,6 +1338,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out index 33d795b..6db9428 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out @@ -64,7 +64,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 3 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is diff --git ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out index 2f7e538..e5626a4 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out @@ -123,6 +123,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out index 7e3b48f..5f983c7 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out @@ -302,7 +302,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12') @@ -342,7 +342,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: CREATE TABLE srcpart_merge_dp LIKE srcpart diff --git ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out index ebfce60..90a71ae 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out @@ -144,7 +144,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test rollup, should be bucketed and sorted on key, value, grouping_key @@ -203,7 +203,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value, grouping_key] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test cube, should not be bucketed or sorted because its missing the grouping ID @@ -328,7 +328,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test cube, should be bucketed and sorted on key, value, grouping_key @@ -387,7 +387,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value, grouping_key] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test grouping sets, should not be bucketed or sorted because its missing the grouping ID @@ -512,7 +512,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test grouping sets, should be bucketed and sorted on key, value, grouping_key @@ -571,6 +571,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key, value, grouping_key] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out index 4ca6517..5ebcf41 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out @@ -361,7 +361,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test SMB join doesn't affect inference, should not be bucketed or sorted @@ -640,6 +640,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out index 53407c5..97cb92d 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out @@ -117,6 +117,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 2 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out index d4c22f4..e59026e 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out @@ -172,7 +172,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2') @@ -211,7 +211,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- The first partition should be bucketed and sorted, the second should not @@ -272,7 +272,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2') @@ -372,7 +372,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2') @@ -411,6 +411,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out index 1e4db29..91466f1 100644 --- ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out +++ ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out @@ -64,7 +64,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join, should be bucketed and sorted by join key @@ -117,7 +117,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys @@ -170,7 +170,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [key, value] -Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key @@ -223,7 +223,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key @@ -276,7 +276,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 16 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the @@ -331,6 +331,6 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 diff --git ql/src/test/results/clientpositive/input_part7.q.out ql/src/test/results/clientpositive/input_part7.q.out index a0cf9af..0ae70f3 100644 --- ql/src/test/results/clientpositive/input_part7.q.out +++ ql/src/test/results/clientpositive/input_part7.q.out @@ -88,25 +88,29 @@ TOK_QUERY TOK_ALLCOLREF TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - ds + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + ds TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - hr + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + hr STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/lineage2.q.out ql/src/test/results/clientpositive/lineage2.q.out index be0b564..8912237 100644 --- ql/src/test/results/clientpositive/lineage2.q.out +++ ql/src/test/results/clientpositive/lineage2.q.out @@ -634,7 +634,7 @@ having count(a.c2) > 0 PREHOOK: type: QUERY PREHOOK: Input: default@dest_l2 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) id)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col $hdt$_0) id))))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"}]} 1 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3 from dest_l2 a join dest_l3 b on (a.id = b.id) diff --git ql/src/test/results/clientpositive/lineage3.q.out ql/src/test/results/clientpositive/lineage3.q.out index ca7d6e0..451f4f5 100644 --- ql/src/test/results/clientpositive/lineage3.q.out +++ ql/src/test/results/clientpositive/lineage3.q.out @@ -67,7 +67,7 @@ where cint > 10 and cint < 10000 limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col alltypesorc) cint))))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]} 762 1 762 1 762 1 @@ -86,7 +86,7 @@ order by a.ctinyint, a.cint PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"40c3faa7abd1cdb7f12c1047a8a1d2ce","queryText":"select a.ctinyint, a.cint, count(a.cdouble)\n over(partition by a.ctinyint order by a.cint desc\n rows between 1 preceding and 1 following)\nfrom alltypesorc a inner join alltypesorc b on a.cint = b.cbigint\norder by a.ctinyint, a.cint","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[3,4,5,6],"targets":[2],"expression":"(tok_function count (. (tok_table_or_col $hdt$_0) cdouble) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) ctinyint)) (tok_orderby (tok_tabsortcolnamedesc (. (tok_table_or_col $hdt$_0) cint)))) (tok_windowrange (preceding 1) (following 1))))","edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2],"expression":"a.cint is not null","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1,2],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"},{"sources":[6],"targets":[0,1,2],"expression":"a.cbigint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"40c3faa7abd1cdb7f12c1047a8a1d2ce","queryText":"select a.ctinyint, a.cint, count(a.cdouble)\n over(partition by a.ctinyint order by a.cint desc\n rows between 1 preceding and 1 following)\nfrom alltypesorc a inner join alltypesorc b on a.cint = b.cbigint\norder by a.ctinyint, a.cint","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[3,4,5,6],"targets":[2],"expression":"(tok_function count (. (tok_table_or_col $hdt$_0) cdouble) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) ctinyint)) (tok_orderby (tok_tabsortcolnamedesc (tok_nulls_last (. (tok_table_or_col $hdt$_0) cint))))) (tok_windowrange (preceding 1) (following 1))))","edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2],"expression":"a.cint is not null","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1,2],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"},{"sources":[6],"targets":[0,1,2],"expression":"a.cbigint is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]} PREHOOK: query: with v2 as (select cdouble, count(cint) over() a, sum(cint + cbigint) over(partition by cboolean1) b @@ -97,7 +97,7 @@ order by cdouble, a, b limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"a083a5322b6a83af6f614f299d0361e4","queryText":"with v2 as\n (select cdouble, count(cint) over() a,\n sum(cint + cbigint) over(partition by cboolean1) b\n from (select * from alltypesorc) v1)\nselect cdouble, a, b, a + b, cdouble + a from v2\nwhere cdouble is not null\norder by cdouble, a, b limit 5","edges":[{"sources":[5],"targets":[0],"edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[1],"expression":"(tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[2],"expression":"(tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[3],"expression":"((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647)))) + (tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cboolean1)))) (tok_windowrange (preceding 2147483647) (following 2147483647)))))","edgeType":"PROJECTION"},{"sources":[5,6,7,8,9,10,11,12,13,14,15,16],"targets":[4],"expression":"(alltypesorc.cdouble + UDFToDouble((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc 0))) (tok_windowrange (preceding 2147483647) (following 2147483647))))))","edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3,4],"expression":"alltypesorc.cdouble is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cdouble"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"b"},{"id":3,"vertexType":"COLUMN","vertexId":"c3"},{"id":4,"vertexType":"COLUMN","vertexId":"c4"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":14,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":15,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":16,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"a083a5322b6a83af6f614f299d0361e4","queryText":"with v2 as\n (select cdouble, count(cint) over() a,\n sum(cint + cbigint) over(partition by cboolean1) b\n from (select * from alltypesorc) v1)\nselect cdouble, a, b, a + b, cdouble + a from v2\nwhere cdouble is not null\norder by cdouble, a, b limit 5","edges":[{"sources":[5],"targets":[0],"edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[1],"expression":"(tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first 0)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[2],"expression":"(tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col alltypesorc) cboolean1))))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[6,7,8,9,10,5,11,12,13,14,15,16],"targets":[3],"expression":"((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first 0)))) (tok_windowrange (preceding 2147483647) (following 2147483647)))) + (tok_function sum (+ (tok_function tok_bigint (. (tok_table_or_col alltypesorc) cint)) (. (tok_table_or_col alltypesorc) cbigint)) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col alltypesorc) cboolean1)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col alltypesorc) cboolean1))))) (tok_windowrange (preceding 2147483647) (following 2147483647)))))","edgeType":"PROJECTION"},{"sources":[5,6,7,8,9,10,11,12,13,14,15,16],"targets":[4],"expression":"(alltypesorc.cdouble + UDFToDouble((tok_function count (. (tok_table_or_col alltypesorc) cint) (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first 0)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))))","edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3,4],"expression":"alltypesorc.cdouble is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cdouble"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"b"},{"id":3,"vertexType":"COLUMN","vertexId":"c3"},{"id":4,"vertexType":"COLUMN","vertexId":"c4"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":14,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":15,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":16,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"}]} -16379.0 9173 -919551973060 -919551963887 -7206.0 -16373.0 9173 -919551973060 -919551963887 -7200.0 -16372.0 9173 -919551973060 -919551963887 -7199.0 @@ -311,13 +311,13 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@dest_v3 -{"version":"1.0","engine":"mr","database":"default","hash":"949093880975cc807ad1a8003e8a8c7c","queryText":"alter view dest_v3 as\n select * from (\n select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n count(b.cstring1) x, b.cboolean1\n from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n where a.cboolean2 = true and b.cfloat > 0\n group by a.ctinyint, a.csmallint, b.cboolean1\n having count(a.cint) > 10\n order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col a) csmallint))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"949093880975cc807ad1a8003e8a8c7c","queryText":"alter view dest_v3 as\n select * from (\n select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,\n count(b.cstring1) x, b.cboolean1\n from alltypesorc a join alltypesorc b on (a.cint = b.cint)\n where a.cboolean2 = true and b.cfloat > 0\n group by a.ctinyint, a.csmallint, b.cboolean1\n having count(a.cint) > 10\n order by a, x, b.cboolean1 limit 10) t","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col a) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col a) csmallint)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col a) csmallint)))))))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,2],"expression":"((a.cboolean2 = true) and (b.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} PREHOOK: query: select * from dest_v3 limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@dest_v3 #### A masked pattern was here #### -{"version":"1.0","engine":"mr","database":"default","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7,8],"targets":[0,1,2],"expression":"(a.cint is not null and (a.cboolean2 = true))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[7,9],"targets":[0,1,2],"expression":"(a.cint is not null and (a.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} +{"version":"1.0","engine":"mr","database":"default","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (tok_nulls_first (. (tok_table_or_col $hdt$_0) csmallint))))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7,8],"targets":[0,1,2],"expression":"(a.cint is not null and (a.cboolean2 = true))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[7,9],"targets":[0,1,2],"expression":"(a.cint is not null and (a.cfloat > 0.0))","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]} 38 216 false 38 229 true PREHOOK: query: drop table if exists src_dp diff --git ql/src/test/results/clientpositive/orc_analyze.q.out ql/src/test/results/clientpositive/orc_analyze.q.out index 87855fa..9b7e7b7 100644 --- ql/src/test/results/clientpositive/orc_analyze.q.out +++ ql/src/test/results/clientpositive/orc_analyze.q.out @@ -917,7 +917,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -960,7 +960,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan @@ -1015,7 +1015,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -1058,7 +1058,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan @@ -1113,7 +1113,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -1156,7 +1156,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: drop table orc_create_people @@ -1262,7 +1262,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -1305,7 +1305,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: drop table orc_create_people diff --git ql/src/test/results/clientpositive/order_null.q.out ql/src/test/results/clientpositive/order_null.q.out new file mode 100644 index 0000000..73ed5d8 --- /dev/null +++ ql/src/test/results/clientpositive/order_null.q.out @@ -0,0 +1,222 @@ +PREHOOK: query: create table src_null (a int, b string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_null +POSTHOOK: query: create table src_null (a int, b string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_null +PREHOOK: query: insert into src_null values (1, 'A') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (1, 'A') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (null, null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (null, null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (2, 'B') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (2, 'B') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (2, 'A') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (2, 'A') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (2, null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__5 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (2, null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__5 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (3, null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__6 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (3, null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__6 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +1 A +2 NULL +2 A +2 B +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +3 NULL +2 NULL +2 A +2 B +1 A +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +3 NULL +2 NULL +NULL NULL +2 A +1 A +2 B +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 B +2 A +1 A +3 NULL +2 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls first +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls first +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +1 A +2 NULL +2 A +2 B +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc nulls first +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc nulls first +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +3 NULL +2 NULL +2 A +2 B +1 A +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 A +1 A +2 B +3 NULL +2 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 B +2 A +1 A +3 NULL +2 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +1 A +2 B +2 A +2 NULL +3 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 B +2 A +1 A +3 NULL +2 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 NULL +3 NULL +NULL NULL +1 A +2 A +2 B diff --git ql/src/test/results/clientpositive/partition_coltype_literals.q.out ql/src/test/results/clientpositive/partition_coltype_literals.q.out index bc159eb..1a93b7a 100644 --- ql/src/test/results/clientpositive/partition_coltype_literals.q.out +++ ql/src/test/results/clientpositive/partition_coltype_literals.q.out @@ -154,7 +154,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- rename partition @@ -205,7 +205,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:0)] +Sort Columns: [Order(col:key, order:0, nullOrder:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- insert partition diff --git ql/src/test/results/clientpositive/pcr.q.out ql/src/test/results/clientpositive/pcr.q.out index 89c5c22..b0183f1 100644 --- ql/src/test/results/clientpositive/pcr.q.out +++ ql/src/test/results/clientpositive/pcr.q.out @@ -85,11 +85,13 @@ TOK_QUERY 5 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -306,8 +308,9 @@ TOK_QUERY 5 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key STAGE DEPENDENCIES: @@ -615,11 +618,13 @@ TOK_QUERY 'val_2' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -851,11 +856,13 @@ TOK_QUERY 'val_5' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1089,11 +1096,13 @@ TOK_QUERY 'val_5' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1383,11 +1392,13 @@ TOK_QUERY 'val_5' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1677,11 +1688,13 @@ TOK_QUERY 14 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -1887,11 +1900,13 @@ TOK_QUERY '2000-04-09' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2133,11 +2148,13 @@ TOK_QUERY '2000-04-10' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2460,14 +2477,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -2689,10 +2709,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -2991,10 +3012,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -3354,14 +3376,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -3718,14 +3743,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -5268,8 +5296,9 @@ TOK_QUERY 11 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_LIMIT 10 @@ -5446,14 +5475,17 @@ TOK_QUERY 11 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - hr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + hr STAGE DEPENDENCIES: @@ -5670,14 +5702,17 @@ TOK_QUERY 11 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - hr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + hr STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/pcs.q.out ql/src/test/results/clientpositive/pcs.q.out index 51a21dd..113b091 100644 --- ql/src/test/results/clientpositive/pcs.q.out +++ ql/src/test/results/clientpositive/pcs.q.out @@ -125,14 +125,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1198,7 +1201,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 + order by: _col0 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -1356,20 +1359,23 @@ TOK_QUERY '2008-04-08' TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - ds + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + ds STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/perf/query12.q.out ql/src/test/results/clientpositive/perf/query12.q.out index f326a92..986a307 100644 --- ql/src/test/results/clientpositive/perf/query12.q.out +++ ql/src/test/results/clientpositive/perf/query12.q.out @@ -24,7 +24,7 @@ Stage-0 Select Operator [SEL_21] (rows=34938 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] PTF Operator [PTF_20] (rows=34938 width=1436) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3","partition by:":"_col3"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3 ASC NULLS FIRST","partition by:":"_col3"}] Select Operator [SEL_19] (rows=34938 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 4 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/perf/query20.q.out ql/src/test/results/clientpositive/perf/query20.q.out index 74d286a..9af7729 100644 --- ql/src/test/results/clientpositive/perf/query20.q.out +++ ql/src/test/results/clientpositive/perf/query20.q.out @@ -26,7 +26,7 @@ Stage-0 Select Operator [SEL_23] (rows=139755 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] PTF Operator [PTF_22] (rows=139755 width=1436) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3","partition by:":"_col3"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3 ASC NULLS FIRST","partition by:":"_col3"}] Select Operator [SEL_21] (rows=139755 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 4 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/perf/query51.q.out ql/src/test/results/clientpositive/perf/query51.q.out index f568ead..08546c5 100644 --- ql/src/test/results/clientpositive/perf/query51.q.out +++ ql/src/test/results/clientpositive/perf/query51.q.out @@ -112,7 +112,7 @@ Stage-0 Filter Operator [FIL_58] (rows=7365 width=1119) predicate:(max_window_0 > max_window_1) PTF Operator [PTF_45] (rows=22096 width=1119) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"CASE WHEN (_col1 is not null) THEN (_col1) ELSE (_col4) END","partition by:":"CASE WHEN (_col0 is not null) THEN (_col0) ELSE (_col3) END"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"CASE WHEN (_col1 is not null) THEN (_col1) ELSE (_col4) END ASC NULLS FIRST","partition by:":"CASE WHEN (_col0 is not null) THEN (_col0) ELSE (_col3) END"}] Select Operator [SEL_44] (rows=22096 width=1119) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 4 [SIMPLE_EDGE] @@ -126,7 +126,7 @@ Stage-0 Select Operator [SEL_37] (rows=20088 width=1119) Output:["_col0","_col1","_col2"] PTF Operator [PTF_36] (rows=20088 width=1119) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col0"}] Group By Operator [GBY_32] (rows=20088 width=1119) Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 9 [SIMPLE_EDGE] @@ -162,7 +162,7 @@ Stage-0 Select Operator [SEL_17] (rows=20088 width=1119) Output:["_col0","_col1","_col2"] PTF Operator [PTF_16] (rows=20088 width=1119) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col0"}] Group By Operator [GBY_12] (rows=20088 width=1119) Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1 <-Reducer 2 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/perf/query67.q.out ql/src/test/results/clientpositive/perf/query67.q.out index f158829..83dab38 100644 --- ql/src/test/results/clientpositive/perf/query67.q.out +++ ql/src/test/results/clientpositive/perf/query67.q.out @@ -111,7 +111,7 @@ Stage-0 Filter Operator [FIL_47] (rows=762300 width=1436) predicate:(rank_window_0 <= 100) PTF Operator [PTF_29] (rows=2286900 width=1436) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col16(DESC)","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col16 DESC NULLS LAST","partition by:":"_col0"}] Select Operator [SEL_28] (rows=2286900 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col16"] <-Reducer 5 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/perf/query70.q.out ql/src/test/results/clientpositive/perf/query70.q.out index 9b58fdb..ecf3563 100644 --- ql/src/test/results/clientpositive/perf/query70.q.out +++ ql/src/test/results/clientpositive/perf/query70.q.out @@ -31,7 +31,7 @@ Stage-0 Select Operator [SEL_57] (rows=66289 width=1119) Output:["_col0","_col1","_col2","_col3","_col4"] PTF Operator [PTF_56] (rows=66289 width=1119) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col4","partition by:":"_col5, CASE WHEN ((_col5 = 2)) THEN (_col0) END"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col4 ASC NULLS FIRST","partition by:":"_col5, CASE WHEN ((_col5 = 2)) THEN (_col0) END"}] Select Operator [SEL_55] (rows=66289 width=1119) Output:["_col0","_col1","_col4","_col5"] <-Reducer 4 [SIMPLE_EDGE] @@ -97,7 +97,7 @@ Stage-0 Filter Operator [FIL_83] (rows=6696 width=1119) predicate:((rank_window_0 <= 5) and _col0 is not null) PTF Operator [PTF_31] (rows=20088 width=1119) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1(DESC)","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 DESC NULLS LAST","partition by:":"_col0"}] Select Operator [SEL_30] (rows=20088 width=1119) Output:["_col0","_col1"] <-Reducer 13 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/perf/query89.q.out ql/src/test/results/clientpositive/perf/query89.q.out index 4d0fb37..f8e6224 100644 --- ql/src/test/results/clientpositive/perf/query89.q.out +++ ql/src/test/results/clientpositive/perf/query89.q.out @@ -81,7 +81,7 @@ Stage-0 Select Operator [SEL_29] (rows=153730 width=1436) Output:["avg_window_0","_col0","_col1","_col2","_col3","_col4","_col5","_col6"] PTF Operator [PTF_28] (rows=153730 width=1436) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col0, _col2, _col3, _col4","partition by:":"_col0, _col2, _col3, _col4"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col0 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST","partition by:":"_col0, _col2, _col3, _col4"}] Select Operator [SEL_27] (rows=153730 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] <-Reducer 5 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/perf/query98.q.out ql/src/test/results/clientpositive/perf/query98.q.out index f51744d..fc4b7a3 100644 --- ql/src/test/results/clientpositive/perf/query98.q.out +++ ql/src/test/results/clientpositive/perf/query98.q.out @@ -24,7 +24,7 @@ Stage-0 Select Operator [SEL_23] (rows=139755 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] PTF Operator [PTF_22] (rows=139755 width=1436) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3","partition by:":"_col3"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3 ASC NULLS FIRST","partition by:":"_col3"}] Select Operator [SEL_21] (rows=139755 width=1436) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Reducer 4 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/pointlookup2.q.out ql/src/test/results/clientpositive/pointlookup2.q.out index 5192e1a..c0b9357 100644 --- ql/src/test/results/clientpositive/pointlookup2.q.out +++ ql/src/test/results/clientpositive/pointlookup2.q.out @@ -141,14 +141,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -365,10 +368,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -627,10 +631,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -941,20 +946,23 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t2 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t2 + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t2 - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t2 + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - ds + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + ds STAGE DEPENDENCIES: @@ -1310,20 +1318,23 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t2 - ds + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t2 + ds STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/pointlookup3.q.out ql/src/test/results/clientpositive/pointlookup3.q.out index 791be61..7d11884 100644 --- ql/src/test/results/clientpositive/pointlookup3.q.out +++ ql/src/test/results/clientpositive/pointlookup3.q.out @@ -98,17 +98,21 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds1 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds1 TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds2 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds2 STAGE DEPENDENCIES: @@ -338,17 +342,21 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds1 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds1 TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds2 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds2 STAGE DEPENDENCIES: @@ -520,10 +528,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -783,10 +792,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -1099,20 +1109,23 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t2 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t2 + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t2 - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t2 + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - ds1 + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + ds1 STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/pointlookup4.q.out ql/src/test/results/clientpositive/pointlookup4.q.out index 948ea7a..182f9dd 100644 --- ql/src/test/results/clientpositive/pointlookup4.q.out +++ ql/src/test/results/clientpositive/pointlookup4.q.out @@ -108,17 +108,21 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds1 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds1 TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds2 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds2 STAGE DEPENDENCIES: @@ -348,17 +352,21 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds1 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds1 TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds2 + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds2 STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/ppd_vc.q.out ql/src/test/results/clientpositive/ppd_vc.q.out index 69a9bc6..ac35641 100644 --- ql/src/test/results/clientpositive/ppd_vc.q.out +++ ql/src/test/results/clientpositive/ppd_vc.q.out @@ -390,14 +390,17 @@ TOK_QUERY b TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - hr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + hr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - BLOCK__OFFSET__INSIDE__FILE + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + BLOCK__OFFSET__INSIDE__FILE STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/ptf.q.out ql/src/test/results/clientpositive/ptf.q.out index c8d37df..cdac02a 100644 --- ql/src/test/results/clientpositive/ptf.q.out +++ ql/src/test/results/clientpositive/ptf.q.out @@ -56,7 +56,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -92,7 +92,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -272,7 +272,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -308,7 +308,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -427,7 +427,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -544,7 +544,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -580,7 +580,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -725,7 +725,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -761,7 +761,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -909,7 +909,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -954,7 +954,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -1095,7 +1095,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -1246,7 +1246,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -1390,7 +1390,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -1416,7 +1416,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1452,7 +1452,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -1565,7 +1565,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -1592,7 +1592,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1629,7 +1629,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1770,7 +1770,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1806,7 +1806,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1950,7 +1950,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1964,7 +1964,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1972,7 +1972,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2009,7 +2009,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2017,7 +2017,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2053,7 +2053,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2204,7 +2204,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2240,7 +2240,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2388,7 +2388,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2464,7 +2464,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2620,7 +2620,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2789,7 +2789,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col0 + order by: _col0 ASC NULLS FIRST output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: @@ -2825,7 +2825,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -3005,7 +3005,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3047,7 +3047,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3122,7 +3122,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3168,7 +3168,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3, _col2 + order by: _col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST partition by: _col3 raw input shape: window functions: @@ -3413,14 +3413,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3434,7 +3434,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3442,7 +3442,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3479,7 +3479,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3487,7 +3487,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3523,7 +3523,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -3698,14 +3698,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3741,7 +3741,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3777,7 +3777,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3813,7 +3813,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3983,14 +3983,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4026,14 +4026,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4069,7 +4069,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4240,14 +4240,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4283,7 +4283,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4297,7 +4297,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4335,7 +4335,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4372,7 +4372,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -4544,7 +4544,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4558,7 +4558,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4566,7 +4566,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4603,7 +4603,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4611,7 +4611,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4647,7 +4647,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -4813,14 +4813,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4834,7 +4834,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4872,7 +4872,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4909,7 +4909,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/ptf_matchpath.q.out ql/src/test/results/clientpositive/ptf_matchpath.q.out index 042982f..beb4e28 100644 --- ql/src/test/results/clientpositive/ptf_matchpath.q.out +++ ql/src/test/results/clientpositive/ptf_matchpath.q.out @@ -94,7 +94,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col2, _col3, _col4 + order by: _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: _col6 raw input shape: @@ -214,7 +214,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col6, _col2, _col3, _col4 + order by: _col6 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: 0 raw input shape: @@ -331,7 +331,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col6, _col2, _col3, _col4 + order by: _col6 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: 0 raw input shape: diff --git ql/src/test/results/clientpositive/ptf_streaming.q.out ql/src/test/results/clientpositive/ptf_streaming.q.out index 35c6d04..d1b49db 100644 --- ql/src/test/results/clientpositive/ptf_streaming.q.out +++ ql/src/test/results/clientpositive/ptf_streaming.q.out @@ -56,7 +56,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -92,7 +92,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -272,7 +272,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -308,7 +308,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -430,7 +430,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -570,7 +570,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -596,7 +596,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -632,7 +632,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -745,7 +745,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -772,7 +772,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -809,7 +809,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -953,7 +953,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -967,7 +967,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -975,7 +975,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1012,7 +1012,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1020,7 +1020,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1056,7 +1056,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1202,7 +1202,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1216,7 +1216,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1224,7 +1224,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1261,7 +1261,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1269,7 +1269,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1305,7 +1305,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1451,7 +1451,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1465,7 +1465,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1473,7 +1473,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1510,7 +1510,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1518,7 +1518,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1554,7 +1554,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1704,7 +1704,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1780,7 +1780,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1959,14 +1959,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1980,7 +1980,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1988,7 +1988,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2025,7 +2025,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2033,7 +2033,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2069,7 +2069,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -2244,14 +2244,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2287,7 +2287,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2323,7 +2323,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2359,7 +2359,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2527,14 +2527,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2548,7 +2548,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2586,7 +2586,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2623,7 +2623,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/ptfgroupbyjoin.q.out ql/src/test/results/clientpositive/ptfgroupbyjoin.q.out index 39d7a17..8476e1c 100644 --- ql/src/test/results/clientpositive/ptfgroupbyjoin.q.out +++ ql/src/test/results/clientpositive/ptfgroupbyjoin.q.out @@ -242,7 +242,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0, _col1 + order by: _col0 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col0, _col1 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/push_or.q.out ql/src/test/results/clientpositive/push_or.q.out index 83680e1..6dadcc0 100644 --- ql/src/test/results/clientpositive/push_or.q.out +++ ql/src/test/results/clientpositive/push_or.q.out @@ -67,11 +67,13 @@ TOK_QUERY 5 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/quotedid_basic.q.out ql/src/test/results/clientpositive/quotedid_basic.q.out index 052e4d0..9ca3f6a 100644 --- ql/src/test/results/clientpositive/quotedid_basic.q.out +++ ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -214,7 +214,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -322,7 +322,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/sample10.q.out ql/src/test/results/clientpositive/sample10.q.out index 8ccb577..71c4c8a 100644 --- ql/src/test/results/clientpositive/sample10.q.out +++ ql/src/test/results/clientpositive/sample10.q.out @@ -77,8 +77,9 @@ TOK_QUERY ds TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/sample6.q.out ql/src/test/results/clientpositive/sample6.q.out index 38e21de..c9c067d 100644 --- ql/src/test/results/clientpositive/sample6.q.out +++ ql/src/test/results/clientpositive/sample6.q.out @@ -624,11 +624,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -1023,11 +1025,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -1676,11 +1680,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2171,11 +2177,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2652,11 +2660,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2981,11 +2991,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -3187,11 +3199,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/semijoin2.q.out ql/src/test/results/clientpositive/semijoin2.q.out index 449dc3a..7e0f51b 100644 --- ql/src/test/results/clientpositive/semijoin2.q.out +++ ql/src/test/results/clientpositive/semijoin2.q.out @@ -154,7 +154,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: (_col98 + _col16), floor(_col21)(DESC) + order by: (_col98 + _col16) ASC NULLS FIRST, floor(_col21) DESC NULLS LAST partition by: (_col98 + _col16) raw input shape: window functions: @@ -201,7 +201,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: (_col99 + _col17)(DESC), floor(_col22)(DESC) + order by: (_col99 + _col17) DESC NULLS LAST, floor(_col22) DESC NULLS LAST partition by: (_col99 + _col17) raw input shape: window functions: diff --git ql/src/test/results/clientpositive/semijoin4.q.out ql/src/test/results/clientpositive/semijoin4.q.out index 0da9eb3..1a6d8d7 100644 --- ql/src/test/results/clientpositive/semijoin4.q.out +++ ql/src/test/results/clientpositive/semijoin4.q.out @@ -172,7 +172,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: (UDFToShort(_col2) + _col0), floor(_col1)(DESC) + order by: (UDFToShort(_col2) + _col0) ASC NULLS FIRST, floor(_col1) DESC NULLS LAST partition by: (UDFToShort(_col2) + _col0) raw input shape: window functions: diff --git ql/src/test/results/clientpositive/show_create_table_alter.q.out ql/src/test/results/clientpositive/show_create_table_alter.q.out index 32819ea..78a34de 100644 --- ql/src/test/results/clientpositive/show_create_table_alter.q.out +++ ql/src/test/results/clientpositive/show_create_table_alter.q.out @@ -24,7 +24,7 @@ CREATE EXTERNAL TABLE `tmp_showcrt1`( CLUSTERED BY ( key) SORTED BY ( - value DESC) + value DESC NULLS LAST) INTO 5 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' @@ -59,7 +59,7 @@ COMMENT 'temporary table' CLUSTERED BY ( key) SORTED BY ( - value DESC) + value DESC NULLS LAST) INTO 5 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' @@ -98,7 +98,7 @@ COMMENT 'changed comment' CLUSTERED BY ( key) SORTED BY ( - value DESC) + value DESC NULLS LAST) INTO 5 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' @@ -136,7 +136,7 @@ COMMENT 'changed comment' CLUSTERED BY ( key) SORTED BY ( - value DESC) + value DESC NULLS LAST) INTO 5 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' @@ -174,7 +174,7 @@ COMMENT 'changed comment' CLUSTERED BY ( key) SORTED BY ( - value DESC) + value DESC NULLS LAST) INTO 5 BUCKETS ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' diff --git ql/src/test/results/clientpositive/smb_mapjoin_13.q.out ql/src/test/results/clientpositive/smb_mapjoin_13.q.out index fc12e35..20980b3 100644 --- ql/src/test/results/clientpositive/smb_mapjoin_13.q.out +++ ql/src/test/results/clientpositive/smb_mapjoin_13.q.out @@ -112,10 +112,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -310,10 +311,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/smb_mapjoin_15.q.out ql/src/test/results/clientpositive/smb_mapjoin_15.q.out index 6f6d80b..f84f474 100644 --- ql/src/test/results/clientpositive/smb_mapjoin_15.q.out +++ ql/src/test/results/clientpositive/smb_mapjoin_15.q.out @@ -84,10 +84,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -344,10 +345,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -552,10 +554,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -760,10 +763,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/spark/bucket5.q.out ql/src/test/results/clientpositive/spark/bucket5.q.out index 547b0cd..d346840 100644 --- ql/src/test/results/clientpositive/spark/bucket5.q.out +++ ql/src/test/results/clientpositive/spark/bucket5.q.out @@ -389,7 +389,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 2 Bucket Columns: [key] -Sort Columns: [Order(col:key, order:1)] +Sort Columns: [Order(col:key, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: SELECT * FROM bucketed_table TABLESAMPLE (BUCKET 1 OUT OF 2) s LIMIT 10 diff --git ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out index d754df2..5975bec 100644 --- ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out +++ ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out @@ -126,15 +126,17 @@ TOK_QUERY value TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - b - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + b + value TOK_LIMIT 1 diff --git ql/src/test/results/clientpositive/spark/ctas.q.out ql/src/test/results/clientpositive/spark/ctas.q.out index 175344c..884d515 100644 --- ql/src/test/results/clientpositive/spark/ctas.q.out +++ ql/src/test/results/clientpositive/spark/ctas.q.out @@ -702,11 +702,13 @@ TOK_CREATETABLE value TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/spark/groupby_resolution.q.out ql/src/test/results/clientpositive/spark/groupby_resolution.q.out index bea6479..fd6e423 100644 --- ql/src/test/results/clientpositive/spark/groupby_resolution.q.out +++ ql/src/test/results/clientpositive/spark/groupby_resolution.q.out @@ -675,7 +675,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: 0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out index 33d795b..6db9428 100644 --- ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out +++ ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out @@ -64,7 +64,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 3 Bucket Columns: [value] -Sort Columns: [Order(col:value, order:1)] +Sort Columns: [Order(col:value, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is diff --git ql/src/test/results/clientpositive/spark/pcr.q.out ql/src/test/results/clientpositive/spark/pcr.q.out index 681251d..f01628c 100644 --- ql/src/test/results/clientpositive/spark/pcr.q.out +++ ql/src/test/results/clientpositive/spark/pcr.q.out @@ -85,11 +85,13 @@ TOK_QUERY 5 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -312,8 +314,9 @@ TOK_QUERY 5 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key STAGE DEPENDENCIES: @@ -627,11 +630,13 @@ TOK_QUERY 'val_2' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -869,11 +874,13 @@ TOK_QUERY 'val_5' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1113,11 +1120,13 @@ TOK_QUERY 'val_5' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1413,11 +1422,13 @@ TOK_QUERY 'val_5' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -1713,11 +1724,13 @@ TOK_QUERY 14 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -1929,11 +1942,13 @@ TOK_QUERY '2000-04-09' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2181,11 +2196,13 @@ TOK_QUERY '2000-04-10' TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2514,14 +2531,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -2749,10 +2769,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -3062,10 +3083,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - t1 - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + t1 + key STAGE DEPENDENCIES: @@ -3390,14 +3412,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -3760,14 +3785,17 @@ TOK_QUERY 2 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: @@ -4630,8 +4658,9 @@ TOK_QUERY 11 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_LIMIT 10 @@ -4814,14 +4843,17 @@ TOK_QUERY 11 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - hr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + hr STAGE DEPENDENCIES: @@ -5044,14 +5076,17 @@ TOK_QUERY 11 TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - hr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + hr STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/spark/ptf.q.out ql/src/test/results/clientpositive/spark/ptf.q.out index a826bc3..0640ca3 100644 --- ql/src/test/results/clientpositive/spark/ptf.q.out +++ ql/src/test/results/clientpositive/spark/ptf.q.out @@ -62,7 +62,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -88,7 +88,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -266,7 +266,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -292,7 +292,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -417,7 +417,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -540,7 +540,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -566,7 +566,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -717,7 +717,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -743,7 +743,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -897,7 +897,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -932,7 +932,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -1092,7 +1092,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -1262,7 +1262,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -1369,7 +1369,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -1396,7 +1396,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1422,7 +1422,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -1540,7 +1540,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -1568,7 +1568,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1595,7 +1595,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1742,7 +1742,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1768,7 +1768,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1918,7 +1918,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1932,7 +1932,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1940,7 +1940,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1967,7 +1967,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1975,7 +1975,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2001,7 +2001,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2158,7 +2158,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2184,7 +2184,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2351,7 +2351,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2396,7 +2396,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2558,7 +2558,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2723,7 +2723,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col0 + order by: _col0 ASC NULLS FIRST output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: @@ -2749,7 +2749,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -2936,7 +2936,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2988,7 +2988,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3024,7 +3024,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3, _col2 + order by: _col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST partition by: _col3 raw input shape: window functions: @@ -3082,7 +3082,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE @@ -3106,7 +3106,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE @@ -3336,14 +3336,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3357,7 +3357,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3365,7 +3365,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3392,7 +3392,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3400,7 +3400,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3426,7 +3426,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -3607,14 +3607,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3640,7 +3640,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3666,7 +3666,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3692,7 +3692,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3868,14 +3868,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3901,14 +3901,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3934,7 +3934,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4111,14 +4111,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4144,7 +4144,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4158,7 +4158,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4186,7 +4186,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4213,7 +4213,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -4391,7 +4391,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4405,7 +4405,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4413,7 +4413,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4440,7 +4440,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4448,7 +4448,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4474,7 +4474,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -4646,14 +4646,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4667,7 +4667,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4695,7 +4695,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4722,7 +4722,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out index fc35091..1098240 100644 --- ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out +++ ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out @@ -100,7 +100,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col2, _col3, _col4 + order by: _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: _col6 raw input shape: @@ -226,7 +226,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col6, _col2, _col3, _col4 + order by: _col6 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: 0 raw input shape: @@ -349,7 +349,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col6, _col2, _col3, _col4 + order by: _col6 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: 0 raw input shape: diff --git ql/src/test/results/clientpositive/spark/ptf_streaming.q.out ql/src/test/results/clientpositive/spark/ptf_streaming.q.out index ecaa779..8e47b11 100644 --- ql/src/test/results/clientpositive/spark/ptf_streaming.q.out +++ ql/src/test/results/clientpositive/spark/ptf_streaming.q.out @@ -62,7 +62,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -88,7 +88,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -266,7 +266,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -292,7 +292,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -433,7 +433,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -557,7 +557,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -584,7 +584,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -610,7 +610,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -728,7 +728,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -756,7 +756,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -783,7 +783,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -933,7 +933,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -947,7 +947,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -955,7 +955,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -982,7 +982,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -990,7 +990,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1016,7 +1016,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1168,7 +1168,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1182,7 +1182,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1190,7 +1190,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1217,7 +1217,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1225,7 +1225,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1251,7 +1251,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1403,7 +1403,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1417,7 +1417,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1425,7 +1425,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1452,7 +1452,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1460,7 +1460,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1486,7 +1486,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1655,7 +1655,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1700,7 +1700,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1885,14 +1885,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1906,7 +1906,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1914,7 +1914,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1941,7 +1941,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1949,7 +1949,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1975,7 +1975,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -2156,14 +2156,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2189,7 +2189,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2215,7 +2215,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2241,7 +2241,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2415,14 +2415,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2436,7 +2436,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2464,7 +2464,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2491,7 +2491,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/spark/sample10.q.out ql/src/test/results/clientpositive/spark/sample10.q.out index 372545a..04cff3a 100644 --- ql/src/test/results/clientpositive/spark/sample10.q.out +++ ql/src/test/results/clientpositive/spark/sample10.q.out @@ -77,8 +77,9 @@ TOK_QUERY ds TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - ds + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + ds STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/spark/sample6.q.out ql/src/test/results/clientpositive/spark/sample6.q.out index 70537de..9b07814 100644 --- ql/src/test/results/clientpositive/spark/sample6.q.out +++ ql/src/test/results/clientpositive/spark/sample6.q.out @@ -469,11 +469,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -874,11 +876,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -1533,11 +1537,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2034,11 +2040,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2521,11 +2529,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -2856,11 +2866,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: @@ -3068,11 +3080,13 @@ TOK_QUERY s TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out index 41379b6..fe45367 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out @@ -112,10 +112,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -322,10 +323,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out index 3779fb6..1205b2d 100644 --- ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out +++ ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out @@ -84,10 +84,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -356,10 +357,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -576,10 +578,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 @@ -796,10 +799,11 @@ TOK_QUERY TOK_ALLCOLREF TOK_ORDERBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - a - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + a + key TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/spark/subquery_in.q.out ql/src/test/results/clientpositive/spark/subquery_in.q.out index 66840ec..abb0057 100644 --- ql/src/test/results/clientpositive/spark/subquery_in.q.out +++ ql/src/test/results/clientpositive/spark/subquery_in.q.out @@ -331,7 +331,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -504,7 +504,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/spark/union_ppr.q.out ql/src/test/results/clientpositive/spark/union_ppr.q.out index e951283..07ced03 100644 --- ql/src/test/results/clientpositive/spark/union_ppr.q.out +++ ql/src/test/results/clientpositive/spark/union_ppr.q.out @@ -87,25 +87,29 @@ TOK_QUERY '2008-04-08' TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - ds + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + ds TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - hr + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + hr STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out index 22b0fea..3c88c95 100644 --- ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out +++ ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out @@ -446,7 +446,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out index 426527f..f9b7caf 100644 --- ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out +++ ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out @@ -157,8 +157,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -183,8 +184,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -196,8 +198,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -211,8 +214,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -312,7 +316,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -341,7 +345,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -510,10 +514,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - j - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + j + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -546,8 +551,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz @@ -734,7 +740,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -763,7 +769,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -883,8 +889,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -992,7 +999,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1110,8 +1117,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1136,8 +1144,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1149,8 +1158,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -1164,8 +1174,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -1265,7 +1276,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1294,7 +1305,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1439,8 +1450,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1465,8 +1477,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1478,8 +1491,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -1502,8 +1516,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz @@ -1599,7 +1614,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1628,7 +1643,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1776,8 +1791,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1802,8 +1818,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1815,8 +1832,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -1839,8 +1857,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz TOK_GROUPBY TOK_TABLE_OR_COL @@ -1943,7 +1962,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1981,7 +2000,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -2124,8 +2143,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABREF TOK_TABNAME part_orc @@ -2309,7 +2329,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -2449,8 +2469,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name = . TOK_TABLE_OR_COL @@ -2667,7 +2688,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -2768,11 +2789,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABSORTCOLNAMEDESC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_LAST + TOK_TABLE_OR_COL + p_size TOK_INSERT TOK_DESTINATION TOK_DIR @@ -2797,11 +2820,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABSORTCOLNAMEDESC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_LAST + TOK_TABLE_OR_COL + p_size r @@ -2832,7 +2857,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -2911,7 +2936,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2940,7 +2965,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -3064,8 +3089,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3090,8 +3116,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3103,8 +3130,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3118,8 +3146,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3154,7 +3183,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -3234,7 +3263,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3264,7 +3293,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3405,8 +3434,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3431,8 +3461,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3444,8 +3475,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3459,8 +3491,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3560,7 +3593,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3589,7 +3622,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3736,11 +3769,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3765,8 +3800,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3778,8 +3814,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3793,8 +3830,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3895,7 +3933,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3909,7 +3947,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3917,7 +3955,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3947,7 +3985,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3955,7 +3993,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3984,7 +4022,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4138,8 +4176,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -4163,8 +4202,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name cd TOK_SELEXPR TOK_TABLE_OR_COL @@ -4187,8 +4227,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding 2 @@ -4310,7 +4351,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4339,7 +4380,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4488,8 +4529,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABREF TOK_TABNAME part_orc @@ -4530,10 +4572,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name r TOK_SELEXPR TOK_FUNCTION @@ -4547,10 +4590,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -4568,10 +4612,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name cd TOK_SELEXPR . @@ -4594,10 +4639,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name TOK_WINDOWRANGE preceding unbounded @@ -4634,10 +4680,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name deltaSz @@ -4801,7 +4848,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4853,7 +4900,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -5009,8 +5056,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -5119,7 +5167,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -5268,8 +5316,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -5302,8 +5351,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_brand + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_brand TOK_WINDOWRANGE preceding 2 @@ -5414,7 +5464,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col0 + order by: _col0 ASC NULLS FIRST output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: @@ -5443,7 +5493,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -5616,8 +5666,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_TAB @@ -5643,8 +5694,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -5656,8 +5708,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -5671,8 +5724,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -5707,8 +5761,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_size TOK_WINDOWVALUES preceding 5 @@ -5725,11 +5780,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -5741,11 +5798,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -5757,11 +5816,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name cud TOK_SELEXPR TOK_FUNCTION @@ -5782,11 +5843,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding 2 @@ -5892,7 +5955,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -5963,7 +6026,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -6002,7 +6065,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3, _col2 + order by: _col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST partition by: _col3 raw input shape: window functions: @@ -6079,7 +6142,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE @@ -6106,7 +6169,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE @@ -6348,8 +6411,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6358,11 +6422,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6371,11 +6437,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -6524,14 +6592,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -6545,7 +6613,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6553,7 +6621,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6583,7 +6651,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6591,7 +6659,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6620,7 +6688,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -6799,8 +6867,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6809,19 +6878,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -6843,8 +6915,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -6856,8 +6929,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -6874,8 +6948,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -6977,14 +7052,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7013,7 +7088,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7042,7 +7117,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7071,7 +7146,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -7248,19 +7323,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -7282,8 +7360,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -7295,8 +7374,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -7313,8 +7393,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name s1 @@ -7411,14 +7492,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7447,14 +7528,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7483,7 +7564,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -7660,19 +7741,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7681,11 +7765,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -7835,14 +7921,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7871,7 +7957,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7885,7 +7971,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7916,7 +8002,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7946,7 +8032,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -8123,19 +8209,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -8159,11 +8248,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -8177,11 +8268,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -8200,11 +8293,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -8224,11 +8319,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -8329,7 +8426,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8343,7 +8440,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8351,7 +8448,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8381,7 +8478,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8389,7 +8486,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8418,7 +8515,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -8589,11 +8686,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -8615,8 +8714,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -8628,8 +8728,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -8646,8 +8747,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWVALUES preceding unbounded @@ -8665,8 +8767,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWVALUES preceding unbounded @@ -8767,14 +8870,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8788,7 +8891,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8819,7 +8922,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8849,7 +8952,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/subquery_in.q.out ql/src/test/results/clientpositive/subquery_in.q.out index 8f5c43c..c351b22 100644 --- ql/src/test/results/clientpositive/subquery_in.q.out +++ ql/src/test/results/clientpositive/subquery_in.q.out @@ -274,7 +274,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -455,7 +455,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/subquery_in_having.q.out ql/src/test/results/clientpositive/subquery_in_having.q.out index d27304f..e623299 100644 --- ql/src/test/results/clientpositive/subquery_in_having.q.out +++ ql/src/test/results/clientpositive/subquery_in_having.q.out @@ -1336,7 +1336,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/subquery_notin.q.out ql/src/test/results/clientpositive/subquery_notin.q.out index 090bdec..6ab31ec 100644 --- ql/src/test/results/clientpositive/subquery_notin.q.out +++ ql/src/test/results/clientpositive/subquery_notin.q.out @@ -342,7 +342,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -498,7 +498,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -626,7 +626,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -793,7 +793,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -954,7 +954,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1146,7 +1146,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index 7aa2375..f4c440f 100644 --- ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -227,7 +227,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -396,7 +396,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -832,7 +832,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -988,7 +988,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/tez/ctas.q.out ql/src/test/results/clientpositive/tez/ctas.q.out index 74e8b98..d11b1a6 100644 --- ql/src/test/results/clientpositive/tez/ctas.q.out +++ ql/src/test/results/clientpositive/tez/ctas.q.out @@ -722,11 +722,13 @@ TOK_CREATETABLE value TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - key + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + key TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - value + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + value TOK_LIMIT 10 diff --git ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out index 576d296..4e54632 100644 --- ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out +++ ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out @@ -1215,7 +1215,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__") @@ -1256,7 +1256,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select count(*) from over1k_part_orc @@ -2255,7 +2255,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2296,7 +2296,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc @@ -2471,7 +2471,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2512,7 +2512,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc diff --git ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out index a28f0a4..d8d46f8 100644 --- ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out +++ ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out @@ -1128,7 +1128,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__") @@ -1169,7 +1169,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select count(*) from over1k_part @@ -2152,7 +2152,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2193,7 +2193,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select * from over1k_part_buck_sort2 @@ -2295,7 +2295,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__") @@ -2336,7 +2336,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Compressed: No Num Buckets: 1 Bucket Columns: [si] -Sort Columns: [Order(col:f, order:1)] +Sort Columns: [Order(col:f, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: select * from over1k_part_buck_sort2 diff --git ql/src/test/results/clientpositive/tez/explainuser_1.q.out ql/src/test/results/clientpositive/tez/explainuser_1.q.out index b39d2fc..cb9cae4 100644 --- ql/src/test/results/clientpositive/tez/explainuser_1.q.out +++ ql/src/test/results/clientpositive/tez/explainuser_1.q.out @@ -2300,7 +2300,7 @@ Stage-0 Filter Operator [FIL_25] (rows=26 width=491) predicate:first_value_window_0 is not null PTF Operator [PTF_10] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col5","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col5 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_9] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 4 [SIMPLE_EDGE] @@ -2667,7 +2667,7 @@ Stage-0 Select Operator [SEL_4] (rows=20 width=52) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"] PTF Operator [PTF_3] (rows=20 width=459) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"0","partition by:":"0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"0 ASC NULLS FIRST","partition by:":"0"}] Select Operator [SEL_2] (rows=20 width=459) Output:["_col2","_col3"] <-Map 1 [SIMPLE_EDGE] @@ -2694,7 +2694,7 @@ Stage-0 Select Operator [SEL_4] (rows=20 width=52) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"] PTF Operator [PTF_3] (rows=20 width=459) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"0","partition by:":"0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"0 ASC NULLS FIRST","partition by:":"0"}] Select Operator [SEL_2] (rows=20 width=459) Output:["_col2","_col3"] <-Map 1 [SIMPLE_EDGE] @@ -2721,7 +2721,7 @@ Stage-0 Select Operator [SEL_4] (rows=20 width=64) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"] PTF Operator [PTF_3] (rows=20 width=612) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col0"}] Select Operator [SEL_2] (rows=20 width=612) Output:["_col0","_col1","_col2","_col3"] <-Map 1 [SIMPLE_EDGE] @@ -2748,7 +2748,7 @@ Stage-0 Select Operator [SEL_4] (rows=25 width=179) Output:["_col0","_col1","_col2"] PTF Operator [PTF_3] (rows=25 width=443) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col0"}] Select Operator [SEL_2] (rows=25 width=443) Output:["_col0","_col1"] <-Map 1 [SIMPLE_EDGE] @@ -4322,14 +4322,14 @@ Stage-0 Select Operator [SEL_7] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_6] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_5] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -4368,14 +4368,14 @@ Stage-0 Select Operator [SEL_14] (rows=29 width=227) Output:["_col0","_col1","_col2","_col3"] PTF Operator [PTF_13] (rows=29 width=223) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_12] (rows=29 width=223) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_11] PartitionCols:_col2 PTF Operator [PTF_10] (rows=29 width=223) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_9] (rows=29 width=223) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] @@ -4430,14 +4430,14 @@ Stage-0 Select Operator [SEL_7] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_6] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_5] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -4481,14 +4481,14 @@ Stage-0 Select Operator [SEL_7] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] PTF Operator [PTF_6] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_5] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] @@ -4535,7 +4535,7 @@ Stage-0 Select Operator [SEL_12] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"] PTF Operator [PTF_11] (rows=26 width=223) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col0"}] Group By Operator [GBY_8] (rows=26 width=223) Output:["_col0","_col1","_col2"],keys:KEY._col0, KEY._col1, KEY._col2 <-Reducer 3 [SIMPLE_EDGE] @@ -4549,7 +4549,7 @@ Stage-0 Select Operator [SEL_4] (rows=26 width=491) Output:["_col1","_col2","_col5"] PTF Operator [PTF_3] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] @@ -4595,7 +4595,7 @@ Stage-0 <-Filter Operator [FIL_14] (rows=26 width=887) predicate:_col0 is not null PTF Operator [PTF_4] (rows=26 width=887) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_3] (rows=26 width=887) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"] <-Map 1 [SIMPLE_EDGE] @@ -4633,21 +4633,21 @@ Stage-0 Select Operator [SEL_8] (rows=26 width=227) Output:["_col0","_col1","_col2","_col3"] PTF Operator [PTF_7] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1, _col5(DESC)","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST, _col5 DESC NULLS LAST","partition by:":"_col2"}] Select Operator [SEL_6] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_5] PartitionCols:_col2 PTF Operator [PTF_4] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col1, _col5(DESC)","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col1 ASC NULLS FIRST, _col5 DESC NULLS LAST","partition by:":"_col2"}}] Select Operator [SEL_3] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_2] PartitionCols:p_mfgr PTF Operator [PTF_1] (rows=26 width=223) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"p_name, p_size(DESC)","partition by:":"p_mfgr"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"p_name ASC NULLS FIRST, p_size DESC NULLS LAST","partition by:":"p_mfgr"}}] TableScan [TS_0] (rows=26 width=223) default@part,part,Tbl:COMPLETE,Col:COMPLETE,Output:["p_name","p_mfgr","p_size"] @@ -4684,21 +4684,21 @@ Stage-0 Select Operator [SEL_8] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_7] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_6] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_5] PartitionCols:_col2 PTF Operator [PTF_4] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_3] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] SHUFFLE [RS_2] PartitionCols:p_mfgr PTF Operator [PTF_1] (rows=26 width=231) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"p_name","partition by:":"p_mfgr"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"p_name ASC NULLS FIRST","partition by:":"p_mfgr"}}] TableScan [TS_0] (rows=26 width=231) default@part,part,Tbl:COMPLETE,Col:COMPLETE,Output:["p_name","p_mfgr","p_size","p_retailprice"] @@ -4735,14 +4735,14 @@ Stage-0 Select Operator [SEL_7] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_6] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_5] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -4787,23 +4787,23 @@ Stage-0 Select Operator [SEL_11] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_10] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_9] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_8] PartitionCols:_col2 PTF Operator [PTF_7] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2(DESC), _col1","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2(DESC), _col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2 DESC NULLS LAST, _col1 ASC NULLS FIRST","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 DESC NULLS LAST, _col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_6] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_5] PartitionCols:_col2 PTF Operator [PTF_4] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2(DESC), _col1","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2(DESC), _col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2 DESC NULLS LAST, _col1 ASC NULLS FIRST","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 DESC NULLS LAST, _col1 ASC NULLS FIRST","partition by:":"_col2"}}] PTF Operator [PTF_3] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2(DESC), _col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 DESC NULLS LAST, _col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -4853,14 +4853,14 @@ Stage-0 Select Operator [SEL_7] (rows=26 width=235) Output:["_col0","_col1","_col2","_col3"] PTF Operator [PTF_6] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_5] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -4908,7 +4908,7 @@ Stage-0 Select Operator [SEL_13] (rows=29 width=259) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"] PTF Operator [PTF_12] (rows=29 width=767) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_11] (rows=29 width=767) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] @@ -4926,7 +4926,7 @@ Stage-0 <-Filter Operator [FIL_18] (rows=26 width=503) predicate:_col0 is not null PTF Operator [PTF_4] (rows=26 width=503) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_3] (rows=26 width=503) Output:["_col0","_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -5054,14 +5054,14 @@ Stage-4 Select Operator [SEL_7] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_6] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_5] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=499) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=499) Output:["_col1","_col2","_col5","_col7"] <-Map 1 [SIMPLE_EDGE] @@ -5075,7 +5075,7 @@ Stage-4 Select Operator [SEL_17] (rows=26 width=247) Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"] PTF Operator [PTF_16] (rows=26 width=499) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3, _col2","partition by:":"_col3"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST","partition by:":"_col3"}] Select Operator [SEL_15] (rows=26 width=499) Output:["_col0","_col2","_col3","_col6"] <-Reducer 4 [SIMPLE_EDGE] @@ -5084,7 +5084,7 @@ Stage-4 Select Operator [SEL_13] (rows=26 width=491) Output:["_col1","_col2","_col5","sum_window_0"] PTF Operator [PTF_12] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col5","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col5 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_11] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] @@ -5148,23 +5148,23 @@ Stage-0 Select Operator [SEL_11] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_10] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col2, _col1","partition by:":"_col2, _col1"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}] Select Operator [SEL_9] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_8] PartitionCols:_col2, _col1 PTF Operator [PTF_7] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2, _col1","partition by:":"_col2, _col1"}},{"Partition table definition":{"name:":"noop","order by:":"_col2, _col1","partition by:":"_col2, _col1"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}}] Select Operator [SEL_6] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_5] PartitionCols:_col2, _col1 PTF Operator [PTF_4] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2, _col1","partition by:":"_col2, _col1"}},{"Partition table definition":{"name:":"noop","order by:":"_col2, _col1","partition by:":"_col2, _col1"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noopwithmap","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}}] PTF Operator [PTF_3] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] @@ -5224,28 +5224,28 @@ Stage-0 Select Operator [SEL_13] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_12] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_11] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 4 [SIMPLE_EDGE] SHUFFLE [RS_10] PartitionCols:_col2 PTF Operator [PTF_9] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_8] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_7] PartitionCols:_col2 PTF Operator [PTF_6] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2, _col1","partition by:":"_col2, _col1"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}}] Select Operator [SEL_5] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2, _col1 PTF Operator [PTF_3] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_2] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] @@ -5300,21 +5300,21 @@ Stage-0 Select Operator [SEL_10] (rows=26 width=239) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] PTF Operator [PTF_9] (rows=26 width=491) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1","partition by:":"_col2"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"_col1 ASC NULLS FIRST","partition by:":"_col2"}] Select Operator [SEL_8] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 3 [SIMPLE_EDGE] SHUFFLE [RS_7] PartitionCols:_col2 PTF Operator [PTF_6] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2","partition by:":"_col2"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST","partition by:":"_col2"}}] Select Operator [SEL_5] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Reducer 2 [SIMPLE_EDGE] SHUFFLE [RS_4] PartitionCols:_col2 PTF Operator [PTF_3] (rows=26 width=491) - Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2, _col1","partition by:":"_col2, _col1"}},{"Partition table definition":{"name:":"noop","order by:":"_col2, _col1","partition by:":"_col2, _col1"}}] + Function definitions:[{},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}},{"Partition table definition":{"name:":"noop","order by:":"_col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST","partition by:":"_col2, _col1"}}] Select Operator [SEL_2] (rows=26 width=491) Output:["_col1","_col2","_col5"] <-Map 1 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/tez/orc_analyze.q.out ql/src/test/results/clientpositive/tez/orc_analyze.q.out index 87855fa..9b7e7b7 100644 --- ql/src/test/results/clientpositive/tez/orc_analyze.q.out +++ ql/src/test/results/clientpositive/tez/orc_analyze.q.out @@ -917,7 +917,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -960,7 +960,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan @@ -1015,7 +1015,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -1058,7 +1058,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan @@ -1113,7 +1113,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -1156,7 +1156,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: drop table orc_create_people @@ -1262,7 +1262,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: desc formatted orc_create_people partition(state="Or") @@ -1305,7 +1305,7 @@ OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat Compressed: No Num Buckets: 4 Bucket Columns: [first_name] -Sort Columns: [Order(col:last_name, order:1)] +Sort Columns: [Order(col:last_name, order:1, nullOrder:0)] Storage Desc Params: serialization.format 1 PREHOOK: query: drop table orc_create_people diff --git ql/src/test/results/clientpositive/tez/order_null.q.out ql/src/test/results/clientpositive/tez/order_null.q.out new file mode 100644 index 0000000..4899946 --- /dev/null +++ ql/src/test/results/clientpositive/tez/order_null.q.out @@ -0,0 +1,222 @@ +PREHOOK: query: create table src_null (a int, b string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_null +POSTHOOK: query: create table src_null (a int, b string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_null +PREHOOK: query: insert into src_null values (1, 'A') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (1, 'A') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (null, null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (null, null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (2, 'B') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__3 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (2, 'B') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__3 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (2, 'A') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__4 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (2, 'A') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__4 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (2, null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__5 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (2, null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__5 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: insert into src_null values (3, null) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__6 +PREHOOK: Output: default@src_null +POSTHOOK: query: insert into src_null values (3, null) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__6 +POSTHOOK: Output: default@src_null +POSTHOOK: Lineage: src_null.a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: src_null.b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +1 A +2 NULL +2 B +2 A +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +3 NULL +2 NULL +2 B +2 A +1 A +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +2 NULL +3 NULL +1 A +2 A +2 B +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 B +1 A +2 A +NULL NULL +2 NULL +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls first +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls first +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +1 A +2 NULL +2 B +2 A +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc nulls first +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a desc nulls first +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +NULL NULL +3 NULL +2 NULL +2 B +2 A +1 A +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +1 A +2 A +2 B +NULL NULL +2 NULL +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 B +1 A +2 A +NULL NULL +2 NULL +3 NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY a asc nulls last, b desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +1 A +2 B +2 A +2 NULL +3 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b desc nulls last, a desc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 B +2 A +1 A +3 NULL +2 NULL +NULL NULL +PREHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last +PREHOOK: type: QUERY +PREHOOK: Input: default@src_null +#### A masked pattern was here #### +POSTHOOK: query: SELECT x.* FROM src_null x ORDER BY b asc nulls first, a asc nulls last +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_null +#### A masked pattern was here #### +2 NULL +3 NULL +NULL NULL +1 A +2 A +2 B diff --git ql/src/test/results/clientpositive/tez/ptf.q.out ql/src/test/results/clientpositive/tez/ptf.q.out index 9281f17..9cc51a9 100644 --- ql/src/test/results/clientpositive/tez/ptf.q.out +++ ql/src/test/results/clientpositive/tez/ptf.q.out @@ -62,7 +62,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -88,7 +88,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -267,7 +267,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -293,7 +293,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -419,7 +419,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -543,7 +543,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -569,7 +569,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -721,7 +721,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -747,7 +747,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -902,7 +902,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -937,7 +937,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -1098,7 +1098,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -1269,7 +1269,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -1377,7 +1377,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -1404,7 +1404,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1430,7 +1430,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -1549,7 +1549,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -1577,7 +1577,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1604,7 +1604,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1752,7 +1752,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1778,7 +1778,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1929,7 +1929,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1943,7 +1943,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1951,7 +1951,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1978,7 +1978,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1986,7 +1986,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2(DESC), _col1 + order by: _col2 DESC NULLS LAST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2012,7 +2012,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2170,7 +2170,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2196,7 +2196,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2364,7 +2364,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2409,7 +2409,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2572,7 +2572,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2738,7 +2738,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col0 + order by: _col0 ASC NULLS FIRST output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: @@ -2764,7 +2764,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -2952,7 +2952,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -2984,7 +2984,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3036,7 +3036,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3072,7 +3072,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3, _col2 + order by: _col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST partition by: _col3 raw input shape: window functions: @@ -3340,14 +3340,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3361,7 +3361,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3369,7 +3369,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3396,7 +3396,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3404,7 +3404,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3430,7 +3430,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -3612,14 +3612,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3645,7 +3645,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3671,7 +3671,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3697,7 +3697,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3874,14 +3874,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -3907,14 +3907,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3940,7 +3940,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4118,14 +4118,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4151,7 +4151,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4165,7 +4165,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4193,7 +4193,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4220,7 +4220,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -4399,7 +4399,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4413,7 +4413,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4421,7 +4421,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4448,7 +4448,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4456,7 +4456,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -4482,7 +4482,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -4655,14 +4655,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4676,7 +4676,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4704,7 +4704,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -4731,7 +4731,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/tez/ptf_matchpath.q.out ql/src/test/results/clientpositive/tez/ptf_matchpath.q.out index af88634..8ca3196 100644 --- ql/src/test/results/clientpositive/tez/ptf_matchpath.q.out +++ ql/src/test/results/clientpositive/tez/ptf_matchpath.q.out @@ -101,7 +101,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col2, _col3, _col4 + order by: _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: _col6 raw input shape: @@ -228,7 +228,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col6, _col2, _col3, _col4 + order by: _col6 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: 0 raw input shape: @@ -352,7 +352,7 @@ STAGE PLANS: input alias: ptf_1 arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath' name: matchpath - order by: _col6, _col2, _col3, _col4 + order by: _col6 ASC NULLS FIRST, _col2 ASC NULLS FIRST, _col3 ASC NULLS FIRST, _col4 ASC NULLS FIRST output shape: tpath: int partition by: 0 raw input shape: diff --git ql/src/test/results/clientpositive/tez/ptf_streaming.q.out ql/src/test/results/clientpositive/tez/ptf_streaming.q.out index 4b5ff70..6bffd7e 100644 --- ql/src/test/results/clientpositive/tez/ptf_streaming.q.out +++ ql/src/test/results/clientpositive/tez/ptf_streaming.q.out @@ -62,7 +62,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -88,7 +88,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -267,7 +267,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -293,7 +293,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -435,7 +435,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -560,7 +560,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -587,7 +587,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -613,7 +613,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -732,7 +732,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -760,7 +760,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -787,7 +787,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -938,7 +938,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -952,7 +952,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -960,7 +960,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -987,7 +987,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -995,7 +995,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1021,7 +1021,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1174,7 +1174,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1188,7 +1188,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1196,7 +1196,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1223,7 +1223,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1231,7 +1231,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1257,7 +1257,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1410,7 +1410,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1424,7 +1424,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1432,7 +1432,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1459,7 +1459,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1467,7 +1467,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1493,7 +1493,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1663,7 +1663,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noopstreaming - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1708,7 +1708,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1894,14 +1894,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1915,7 +1915,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1923,7 +1923,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1950,7 +1950,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1958,7 +1958,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -1984,7 +1984,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -2166,14 +2166,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2199,7 +2199,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2225,7 +2225,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2251,7 +2251,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -2426,14 +2426,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2447,7 +2447,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2475,7 +2475,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmapstreaming - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -2502,7 +2502,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/tez/subquery_in.q.out ql/src/test/results/clientpositive/tez/subquery_in.q.out index 7cd4113..67decae 100644 --- ql/src/test/results/clientpositive/tez/subquery_in.q.out +++ ql/src/test/results/clientpositive/tez/subquery_in.q.out @@ -333,7 +333,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -507,7 +507,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out index 431e334..7eb9a92 100644 --- ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out +++ ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out @@ -157,8 +157,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -183,8 +184,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -196,8 +198,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -211,8 +214,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -313,7 +317,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -342,7 +346,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -511,10 +515,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - j - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + j + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -547,8 +552,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz @@ -737,7 +743,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -766,7 +772,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -886,8 +892,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -996,7 +1003,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1114,8 +1121,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1140,8 +1148,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1153,8 +1162,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -1168,8 +1178,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -1270,7 +1281,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1299,7 +1310,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1444,8 +1455,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1470,8 +1482,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1483,8 +1496,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -1507,8 +1521,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz @@ -1605,7 +1620,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1634,7 +1649,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1782,8 +1797,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1808,8 +1824,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1821,8 +1838,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -1845,8 +1863,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz TOK_GROUPBY TOK_TABLE_OR_COL @@ -1950,7 +1969,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1988,7 +2007,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -2131,8 +2150,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABREF TOK_TABNAME part_orc @@ -2317,7 +2337,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -2458,8 +2478,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name = . TOK_TABLE_OR_COL @@ -2678,7 +2699,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -2779,11 +2800,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABSORTCOLNAMEDESC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_LAST + TOK_TABLE_OR_COL + p_size TOK_INSERT TOK_DESTINATION TOK_DIR @@ -2808,11 +2831,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABSORTCOLNAMEDESC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_LAST + TOK_TABLE_OR_COL + p_size r @@ -2844,7 +2869,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -2923,7 +2948,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2952,7 +2977,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -3076,8 +3101,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3102,8 +3128,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3115,8 +3142,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3130,8 +3158,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3167,7 +3196,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -3247,7 +3276,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3277,7 +3306,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3418,8 +3447,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3444,8 +3474,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3457,8 +3488,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3472,8 +3504,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3574,7 +3607,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3603,7 +3636,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3750,11 +3783,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3779,8 +3814,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3792,8 +3828,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3807,8 +3844,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3910,7 +3948,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3924,7 +3962,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3932,7 +3970,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3962,7 +4000,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3970,7 +4008,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3999,7 +4037,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4153,8 +4191,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -4178,8 +4217,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name cd TOK_SELEXPR TOK_TABLE_OR_COL @@ -4202,8 +4242,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding 2 @@ -4326,7 +4367,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4355,7 +4396,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4504,8 +4545,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABREF TOK_TABNAME part_orc @@ -4546,10 +4588,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name r TOK_SELEXPR TOK_FUNCTION @@ -4563,10 +4606,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -4584,10 +4628,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name cd TOK_SELEXPR . @@ -4610,10 +4655,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name TOK_WINDOWRANGE preceding unbounded @@ -4650,10 +4696,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name deltaSz @@ -4818,7 +4865,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4871,7 +4918,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -5027,8 +5074,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -5138,7 +5186,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -5287,8 +5335,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -5321,8 +5370,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_brand + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_brand TOK_WINDOWRANGE preceding 2 @@ -5434,7 +5484,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col0 + order by: _col0 ASC NULLS FIRST output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: @@ -5463,7 +5513,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -5636,8 +5686,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_TAB @@ -5663,8 +5714,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -5676,8 +5728,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -5691,8 +5744,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -5727,8 +5781,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_size TOK_WINDOWVALUES preceding 5 @@ -5745,11 +5800,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -5761,11 +5818,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -5777,11 +5836,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name cud TOK_SELEXPR TOK_FUNCTION @@ -5802,11 +5863,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding 2 @@ -5913,7 +5976,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -5950,7 +6013,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -6021,7 +6084,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -6060,7 +6123,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3, _col2 + order by: _col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST partition by: _col3 raw input shape: window functions: @@ -6355,8 +6418,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6365,11 +6429,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6378,11 +6444,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -6532,14 +6600,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -6553,7 +6621,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6561,7 +6629,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6591,7 +6659,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6599,7 +6667,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -6628,7 +6696,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -6807,8 +6875,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -6817,19 +6886,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -6851,8 +6923,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -6864,8 +6937,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -6882,8 +6956,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -6986,14 +7061,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7022,7 +7097,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7051,7 +7126,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7080,7 +7155,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -7257,19 +7332,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -7291,8 +7369,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -7304,8 +7383,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -7322,8 +7402,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name s1 @@ -7421,14 +7502,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7457,14 +7538,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7493,7 +7574,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -7670,19 +7751,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7691,11 +7775,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -7846,14 +7932,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7882,7 +7968,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7896,7 +7982,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7927,7 +8013,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7957,7 +8043,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -8134,19 +8220,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -8170,11 +8259,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -8188,11 +8279,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -8211,11 +8304,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -8235,11 +8330,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -8341,7 +8438,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8355,7 +8452,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8363,7 +8460,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8393,7 +8490,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8401,7 +8498,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8430,7 +8527,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -8601,11 +8698,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -8627,8 +8726,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -8640,8 +8740,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -8658,8 +8759,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWVALUES preceding unbounded @@ -8677,8 +8779,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWVALUES preceding unbounded @@ -8780,14 +8883,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8801,7 +8904,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8832,7 +8935,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8862,7 +8965,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/tez/windowing_gby.q.out ql/src/test/results/clientpositive/tez/windowing_gby.q.out index 2af3e58..d058d7a 100644 --- ql/src/test/results/clientpositive/tez/windowing_gby.q.out +++ ql/src/test/results/clientpositive/tez/windowing_gby.q.out @@ -26,7 +26,7 @@ Stage-0 Select Operator [SEL_17] (rows=6758 width=215) Output:["_col0"] PTF Operator [PTF_16] (rows=6758 width=215) - Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(UDFToDouble(_col1) / UDFToDouble(_col2))","partition by:":"0"}] + Function definitions:[{},{"name:":"windowingtablefunction","order by:":"(UDFToDouble(_col1) / UDFToDouble(_col2)) ASC NULLS FIRST","partition by:":"0"}] Select Operator [SEL_15] (rows=6758 width=215) Output:["_col1","_col2"] <-Reducer 3 [SIMPLE_EDGE] diff --git ql/src/test/results/clientpositive/union_ppr.q.out ql/src/test/results/clientpositive/union_ppr.q.out index b1a3bcf..8ed92cd 100644 --- ql/src/test/results/clientpositive/union_ppr.q.out +++ ql/src/test/results/clientpositive/union_ppr.q.out @@ -87,25 +87,29 @@ TOK_QUERY '2008-04-08' TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - key + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + key TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - value + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + value TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - ds + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + ds TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - A - hr + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + A + hr STAGE DEPENDENCIES: diff --git ql/src/test/results/clientpositive/union_remove_6_subq.q.out ql/src/test/results/clientpositive/union_remove_6_subq.q.out index fe58686..59ccf49 100644 --- ql/src/test/results/clientpositive/union_remove_6_subq.q.out +++ ql/src/test/results/clientpositive/union_remove_6_subq.q.out @@ -536,7 +536,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/vectorized_ptf.q.out ql/src/test/results/clientpositive/vectorized_ptf.q.out index 479953c..16552ed 100644 --- ql/src/test/results/clientpositive/vectorized_ptf.q.out +++ ql/src/test/results/clientpositive/vectorized_ptf.q.out @@ -157,8 +157,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -183,8 +184,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -196,8 +198,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -211,8 +214,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -306,7 +310,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -382,7 +386,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -551,10 +555,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - j - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + j + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -587,8 +592,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz @@ -763,7 +769,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -839,7 +845,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -959,8 +965,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1062,7 +1069,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1180,8 +1187,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1206,8 +1214,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1219,8 +1228,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -1234,8 +1244,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -1329,7 +1340,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -1405,7 +1416,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1550,8 +1561,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1576,8 +1588,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1589,8 +1602,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -1613,8 +1627,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz @@ -1704,7 +1719,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -1780,7 +1795,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -1928,8 +1943,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -1954,8 +1970,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -1967,8 +1984,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -1991,8 +2009,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name deltaSz TOK_GROUPBY TOK_TABLE_OR_COL @@ -2089,7 +2108,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -2174,7 +2193,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -2317,8 +2336,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABREF TOK_TABNAME part_orc @@ -2429,7 +2449,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -2676,8 +2696,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name = . TOK_TABLE_OR_COL @@ -2784,7 +2805,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string partition by: _col2 raw input shape: @@ -3029,11 +3050,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABSORTCOLNAMEDESC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_LAST + TOK_TABLE_OR_COL + p_size TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3058,11 +3081,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABSORTCOLNAMEDESC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_LAST + TOK_TABLE_OR_COL + p_size r @@ -3088,7 +3113,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name, p_size(DESC) + order by: p_name ASC NULLS FIRST, p_size DESC NULLS LAST output shape: p_name: string, p_mfgr: string, p_size: int partition by: p_mfgr raw input shape: @@ -3166,7 +3191,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -3242,7 +3267,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1, _col5(DESC) + order by: _col1 ASC NULLS FIRST, _col5 DESC NULLS LAST partition by: _col2 raw input shape: window functions: @@ -3366,8 +3391,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3392,8 +3418,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3405,8 +3432,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3420,8 +3448,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3451,7 +3480,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: p_name + order by: p_name ASC NULLS FIRST output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double partition by: p_mfgr raw input shape: @@ -3530,7 +3559,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3607,7 +3636,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -3748,8 +3777,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -3774,8 +3804,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -3787,8 +3818,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -3802,8 +3834,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -3897,7 +3930,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -3973,7 +4006,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4120,11 +4153,13 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -4149,8 +4184,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -4162,8 +4198,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -4177,8 +4214,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -4273,7 +4311,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4287,7 +4325,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4295,7 +4333,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4372,7 +4410,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4380,7 +4418,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4456,7 +4494,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -4610,8 +4648,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -4635,8 +4674,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name cd TOK_SELEXPR TOK_TABLE_OR_COL @@ -4659,8 +4699,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding 2 @@ -4776,7 +4817,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -4852,7 +4893,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -5001,8 +5042,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_TABREF TOK_TABNAME part_orc @@ -5043,10 +5085,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name r TOK_SELEXPR TOK_FUNCTION @@ -5060,10 +5103,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -5081,10 +5125,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name cd TOK_SELEXPR . @@ -5107,10 +5152,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name TOK_WINDOWRANGE preceding unbounded @@ -5147,10 +5193,11 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - . - TOK_TABLE_OR_COL - abc - p_name + TOK_NULLS_FIRST + . + TOK_TABLE_OR_COL + abc + p_name deltaSz @@ -5241,7 +5288,7 @@ STAGE PLANS: Partition table definition input alias: abc name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col0: int, _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -5447,7 +5494,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -5603,8 +5650,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -5707,7 +5755,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -5902,8 +5950,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -5936,8 +5985,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_brand + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_brand TOK_WINDOWRANGE preceding 2 @@ -6042,7 +6092,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col0 + order by: _col0 ASC NULLS FIRST output shape: _col0: string, _col1: string, _col2: double partition by: _col0 raw input shape: @@ -6118,7 +6168,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: @@ -6291,8 +6341,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_TAB @@ -6318,8 +6369,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -6331,8 +6383,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -6346,8 +6399,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -6382,8 +6436,9 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_size + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_size TOK_WINDOWVALUES preceding 5 @@ -6400,11 +6455,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -6416,11 +6473,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_FUNCTION @@ -6432,11 +6491,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name cud TOK_SELEXPR TOK_FUNCTION @@ -6457,11 +6518,13 @@ TOK_QUERY p_mfgr TOK_SORTBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding 2 @@ -6560,7 +6623,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col1 + order by: _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int, _col7: double partition by: _col2 raw input shape: @@ -6653,7 +6716,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -6788,7 +6851,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -6874,7 +6937,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col3, _col2 + order by: _col3 ASC NULLS FIRST, _col2 ASC NULLS FIRST partition by: _col3 raw input shape: window functions: @@ -7140,8 +7203,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7150,11 +7214,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7163,11 +7229,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -7310,14 +7378,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7331,7 +7399,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7339,7 +7407,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7416,7 +7484,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7424,7 +7492,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -7500,7 +7568,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -7679,8 +7747,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -7689,19 +7758,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -7723,8 +7795,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -7736,8 +7809,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -7754,8 +7828,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -7851,14 +7926,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -7934,7 +8009,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8010,7 +8085,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8086,7 +8161,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -8263,19 +8338,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -8297,8 +8375,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -8310,8 +8389,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -8328,8 +8408,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name s1 @@ -8420,14 +8501,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -8503,14 +8584,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -8586,7 +8667,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -8763,19 +8844,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL @@ -8784,11 +8868,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -8932,14 +9018,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -9015,7 +9101,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -9029,7 +9115,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -9107,7 +9193,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -9184,7 +9270,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -9361,19 +9447,22 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_PARTITIONINGSPEC TOK_DISTRIBUTEBY TOK_TABLE_OR_COL p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_INSERT TOK_DESTINATION TOK_DIR @@ -9397,11 +9486,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -9415,11 +9506,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -9438,11 +9531,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -9462,11 +9557,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWRANGE preceding unbounded @@ -9561,7 +9658,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -9575,7 +9672,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -9583,7 +9680,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -9660,7 +9757,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -9668,7 +9765,7 @@ STAGE PLANS: Partition table definition input alias: ptf_2 name: noop - order by: _col2 + order by: _col2 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2 raw input shape: @@ -9744,7 +9841,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST partition by: _col2, _col1 raw input shape: window functions: @@ -9915,11 +10012,13 @@ TOK_QUERY p_name TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_mfgr + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_mfgr TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_INSERT TOK_DESTINATION TOK_DIR @@ -9941,8 +10040,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name r TOK_SELEXPR TOK_FUNCTION @@ -9954,8 +10054,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name dr TOK_SELEXPR TOK_TABLE_OR_COL @@ -9972,8 +10073,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWVALUES preceding unbounded @@ -9991,8 +10093,9 @@ TOK_QUERY p_mfgr TOK_ORDERBY TOK_TABSORTCOLNAMEASC - TOK_TABLE_OR_COL - p_name + TOK_NULLS_FIRST + TOK_TABLE_OR_COL + p_name TOK_WINDOWVALUES preceding unbounded @@ -10087,14 +10190,14 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: Partition table definition input alias: ptf_2 name: noop - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -10108,7 +10211,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -10186,7 +10289,7 @@ STAGE PLANS: Partition table definition input alias: ptf_1 name: noopwithmap - order by: _col2, _col1 + order by: _col2 ASC NULLS FIRST, _col1 ASC NULLS FIRST output shape: _col1: string, _col2: string, _col5: int partition by: _col2, _col1 raw input shape: @@ -10263,7 +10366,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: diff --git ql/src/test/results/clientpositive/windowing_streaming.q.out ql/src/test/results/clientpositive/windowing_streaming.q.out index d3226a1..27dd96f 100644 --- ql/src/test/results/clientpositive/windowing_streaming.q.out +++ ql/src/test/results/clientpositive/windowing_streaming.q.out @@ -84,7 +84,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -155,7 +155,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col1 ASC NULLS FIRST partition by: _col2 raw input shape: window functions: @@ -324,7 +324,7 @@ STAGE PLANS: Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col5 + order by: _col5 ASC NULLS FIRST partition by: _col0 raw input shape: window functions: diff --git serde/if/serde.thrift serde/if/serde.thrift index 40d5f47..0b3804d 100644 --- serde/if/serde.thrift +++ serde/if/serde.thrift @@ -30,6 +30,7 @@ const string SERIALIZATION_NULL_FORMAT = "serialization.null.format" const string SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf" const string SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest" const string SERIALIZATION_SORT_ORDER = "serialization.sort.order" +const string SERIALIZATION_NULL_POSITION = "serialization.null.position"; const string SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object" const string SERIALIZATION_ENCODING = "serialization.encoding" diff --git serde/src/gen/thrift/gen-cpp/serde_constants.cpp serde/src/gen/thrift/gen-cpp/serde_constants.cpp index 243d3b8..75701e2 100644 --- serde/src/gen/thrift/gen-cpp/serde_constants.cpp +++ serde/src/gen/thrift/gen-cpp/serde_constants.cpp @@ -27,6 +27,8 @@ serdeConstants::serdeConstants() { SERIALIZATION_SORT_ORDER = "serialization.sort.order"; + SERIALIZATION_NULL_POSITION = "serialization.null.position"; + SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"; SERIALIZATION_ENCODING = "serialization.encoding"; diff --git serde/src/gen/thrift/gen-cpp/serde_constants.h serde/src/gen/thrift/gen-cpp/serde_constants.h index 3566ead..6d85928 100644 --- serde/src/gen/thrift/gen-cpp/serde_constants.h +++ serde/src/gen/thrift/gen-cpp/serde_constants.h @@ -23,6 +23,7 @@ class serdeConstants { std::string SERIALIZATION_ESCAPE_CRLF; std::string SERIALIZATION_LAST_COLUMN_TAKES_REST; std::string SERIALIZATION_SORT_ORDER; + std::string SERIALIZATION_NULL_POSITION; std::string SERIALIZATION_USE_JSON_OBJECTS; std::string SERIALIZATION_ENCODING; std::string FIELD_DELIM; diff --git serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java index 8b3eeb7..5cbbfaa 100644 --- serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java +++ serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/serdeConstants.java @@ -52,6 +52,8 @@ public static final String SERIALIZATION_SORT_ORDER = "serialization.sort.order"; + public static final String SERIALIZATION_NULL_POSITION = "serialization.null.position"; + public static final String SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"; public static final String SERIALIZATION_ENCODING = "serialization.encoding"; diff --git serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php index 8370698..0bc6dd7 100644 --- serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php +++ serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php @@ -26,6 +26,7 @@ final class Constant extends \Thrift\Type\TConstant { static protected $SERIALIZATION_ESCAPE_CRLF; static protected $SERIALIZATION_LAST_COLUMN_TAKES_REST; static protected $SERIALIZATION_SORT_ORDER; + static protected $SERIALIZATION_NULL_POSITION; static protected $SERIALIZATION_USE_JSON_OBJECTS; static protected $SERIALIZATION_ENCODING; static protected $FIELD_DELIM; @@ -97,6 +98,10 @@ final class Constant extends \Thrift\Type\TConstant { return "serialization.sort.order"; } + static protected function init_SERIALIZATION_NULL_POSITION() { + return "serialization.null.position"; + } + static protected function init_SERIALIZATION_USE_JSON_OBJECTS() { return "serialization.use.json.object"; } diff --git serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py index 6ef3bcf..7939791 100644 --- serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py +++ serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py @@ -17,6 +17,7 @@ SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf" SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest" SERIALIZATION_SORT_ORDER = "serialization.sort.order" +SERIALIZATION_NULL_POSITION = "serialization.null.position" SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object" SERIALIZATION_ENCODING = "serialization.encoding" FIELD_DELIM = "field.delim" diff --git serde/src/gen/thrift/gen-rb/serde_constants.rb serde/src/gen/thrift/gen-rb/serde_constants.rb index f98441b..d09e3c2 100644 --- serde/src/gen/thrift/gen-rb/serde_constants.rb +++ serde/src/gen/thrift/gen-rb/serde_constants.rb @@ -23,6 +23,8 @@ SERIALIZATION_LAST_COLUMN_TAKES_REST = %q"serialization.last.column.takes.rest" SERIALIZATION_SORT_ORDER = %q"serialization.sort.order" +SERIALIZATION_NULL_POSITION = %q"serialization.null.position" + SERIALIZATION_USE_JSON_OBJECTS = %q"serialization.use.json.object" SERIALIZATION_ENCODING = %q"serialization.encoding" diff --git serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java index 144ea5a..4e7eb2b 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java +++ serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java @@ -28,8 +28,6 @@ import java.util.Map; import java.util.Properties; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveIntervalDayTime; @@ -92,16 +90,18 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * BinarySortableSerDe can be used to write data in a way that the data can be * compared byte-by-byte with the same order. * - * The data format: NULL: a single byte \0 NON-NULL Primitives: ALWAYS prepend a - * single byte \1, and then: Boolean: FALSE = \1, TRUE = \2 Byte: flip the - * sign-bit to make sure negative comes before positive Short: flip the sign-bit - * to make sure negative comes before positive Int: flip the sign-bit to make - * sure negative comes before positive Long: flip the sign-bit to make sure + * The data format: NULL: a single byte (\0 or \1, check below) NON-NULL Primitives: + * ALWAYS prepend a single byte (\0 or \1), and then: Boolean: FALSE = \1, TRUE = \2 + * Byte: flip the sign-bit to make sure negative comes before positive Short: flip the + * sign-bit to make sure negative comes before positive Int: flip the sign-bit to + * make sure negative comes before positive Long: flip the sign-bit to make sure * negative comes before positive Double: flip the sign-bit for positive double, * and all bits for negative double values String: NULL-terminated UTF-8 string, * with NULL escaped to \1 \1, and \1 escaped to \1 \2 NON-NULL Complex Types: @@ -115,14 +115,23 @@ * field should be sorted ascendingly, and "-" means descendingly. The sub * fields in the same top-level field will have the same sort order. * + * This SerDe takes an additional parameter SERIALIZATION_NULL_POSITION which is a + * string containing only "a" and "z". The length of the string should equal to + * the number of fields in the top-level struct for serialization. "a" means that + * NULL should come first (thus, single byte is \0 for ascending order, \1 + * for descending order), while "z" means that NULL should come last (thus, single + * byte is \1 for ascending order, \0 for descending order). */ @SerDeSpec(schemaProps = { serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES, - serdeConstants.SERIALIZATION_SORT_ORDER}) + serdeConstants.SERIALIZATION_SORT_ORDER, serdeConstants.SERIALIZATION_NULL_POSITION}) public class BinarySortableSerDe extends AbstractSerDe { public static final Logger LOG = LoggerFactory.getLogger(BinarySortableSerDe.class.getName()); + public static final byte ZERO = (byte) 0; + public static final byte ONE = (byte) 1; + List columnNames; List columnTypes; @@ -130,6 +139,8 @@ StructObjectInspector rowObjectInspector; boolean[] columnSortOrderIsDesc; + byte[] columnNullMarker; + byte[] columnNotNullMarker; public static Charset decimalCharSet = Charset.forName("US-ASCII"); @@ -170,6 +181,37 @@ public void initialize(Configuration conf, Properties tbl) columnSortOrderIsDesc[i] = (columnSortOrder != null && columnSortOrder .charAt(i) == '-'); } + + // Null first/last + String columnNullOrder = tbl + .getProperty(serdeConstants.SERIALIZATION_NULL_POSITION); + columnNullMarker = new byte[columnNames.size()]; + columnNotNullMarker = new byte[columnNames.size()]; + for (int i = 0; i < columnSortOrderIsDesc.length; i++) { + if (columnSortOrderIsDesc[i]) { + // Descending + if (columnNullOrder != null && columnNullOrder.charAt(i) == 'a') { + // Null first + columnNullMarker[i] = ONE; + columnNotNullMarker[i] = ZERO; + } else { + // Null last (default for descending order) + columnNullMarker[i] = ZERO; + columnNotNullMarker[i] = ONE; + } + } else { + // Ascending + if (columnNullOrder != null && columnNullOrder.charAt(i) == 'z') { + // Null last + columnNullMarker[i] = ONE; + columnNotNullMarker[i] = ZERO; + } else { + // Null first (default for ascending order) + columnNullMarker[i] = ZERO; + columnNotNullMarker[i] = ONE; + } + } + } } @Override @@ -193,7 +235,7 @@ public Object deserialize(Writable blob) throws SerDeException { try { for (int i = 0; i < columnNames.size(); i++) { row.set(i, deserialize(inputByteBuffer, columnTypes.get(i), - columnSortOrderIsDesc[i], row.get(i))); + columnSortOrderIsDesc[i], columnNullMarker[i], columnNotNullMarker[i], row.get(i))); } } catch (IOException e) { throw new SerDeException(e); @@ -203,14 +245,14 @@ public Object deserialize(Writable blob) throws SerDeException { } static Object deserialize(InputByteBuffer buffer, TypeInfo type, - boolean invert, Object reuse) throws IOException { + boolean invert, byte nullMarker, byte notNullMarker, Object reuse) throws IOException { // Is this field a null? byte isNull = buffer.read(invert); - if (isNull == 0) { + if (isNull == nullMarker) { return null; } - assert (isNull == 1); + assert (isNull == notNullMarker); switch (type.getCategory()) { case PRIMITIVE: { @@ -475,7 +517,7 @@ static Object deserialize(InputByteBuffer buffer, TypeInfo type, if (size == r.size()) { r.add(null); } - r.set(size, deserialize(buffer, etype, invert, r.get(size))); + r.set(size, deserialize(buffer, etype, invert, nullMarker, notNullMarker, r.get(size))); size++; } // Remove additional elements if the list is reused @@ -506,8 +548,8 @@ static Object deserialize(InputByteBuffer buffer, TypeInfo type, } // \1 followed by each key and then each value assert (more == 1); - Object k = deserialize(buffer, ktype, invert, null); - Object v = deserialize(buffer, vtype, invert, null); + Object k = deserialize(buffer, ktype, invert, nullMarker, notNullMarker, null); + Object v = deserialize(buffer, vtype, invert, nullMarker, notNullMarker, null); r.put(k, v); } return r; @@ -527,7 +569,7 @@ static Object deserialize(InputByteBuffer buffer, TypeInfo type, // Read one field by one field for (int eid = 0; eid < size; eid++) { r - .set(eid, deserialize(buffer, fieldTypes.get(eid), invert, r + .set(eid, deserialize(buffer, fieldTypes.get(eid), invert, nullMarker, notNullMarker, r .get(eid))); } return r; @@ -540,7 +582,7 @@ static Object deserialize(InputByteBuffer buffer, TypeInfo type, byte tag = buffer.read(invert); r.setTag(tag); r.setObject(deserialize(buffer, utype.getAllUnionObjectTypeInfos().get(tag), - invert, null)); + invert, nullMarker, notNullMarker, null)); return r; } default: { @@ -626,7 +668,8 @@ public Writable serialize(Object obj, ObjectInspector objInspector) throws SerDe for (int i = 0; i < columnNames.size(); i++) { serialize(output, soi.getStructFieldData(obj, fields.get(i)), - fields.get(i).getFieldObjectInspector(), columnSortOrderIsDesc[i]); + fields.get(i).getFieldObjectInspector(), columnSortOrderIsDesc[i], + columnNullMarker[i], columnNotNullMarker[i]); } serializeBytesWritable.set(output.getData(), 0, output.getLength()); @@ -641,14 +684,14 @@ public static void writeByte(RandomAccessOutput buffer, byte b, boolean invert) } static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, - boolean invert) throws SerDeException { + boolean invert, byte nullMarker, byte notNullMarker) throws SerDeException { // Is this field a null? if (o == null) { - writeByte(buffer, (byte) 0, invert); + writeByte(buffer, nullMarker, invert); return; } // This field is not a null. - writeByte(buffer, (byte) 1, invert); + writeByte(buffer, notNullMarker, invert); switch (oi.getCategory()) { case PRIMITIVE: { @@ -786,7 +829,7 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, int size = loi.getListLength(o); for (int eid = 0; eid < size; eid++) { writeByte(buffer, (byte) 1, invert); - serialize(buffer, loi.getListElement(o, eid), eoi, invert); + serialize(buffer, loi.getListElement(o, eid), eoi, invert, nullMarker, notNullMarker); } // and \0 to terminate writeByte(buffer, (byte) 0, invert); @@ -801,8 +844,8 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, Map map = moi.getMap(o); for (Map.Entry entry : map.entrySet()) { writeByte(buffer, (byte) 1, invert); - serialize(buffer, entry.getKey(), koi, invert); - serialize(buffer, entry.getValue(), voi, invert); + serialize(buffer, entry.getKey(), koi, invert, nullMarker, notNullMarker); + serialize(buffer, entry.getValue(), voi, invert, nullMarker, notNullMarker); } // and \0 to terminate writeByte(buffer, (byte) 0, invert); @@ -814,7 +857,7 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, for (int i = 0; i < fields.size(); i++) { serialize(buffer, soi.getStructFieldData(o, fields.get(i)), fields.get( - i).getFieldObjectInspector(), invert); + i).getFieldObjectInspector(), invert, nullMarker, notNullMarker); } return; } @@ -823,7 +866,7 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, byte tag = uoi.getTag(o); writeByte(buffer, tag, invert); serialize(buffer, uoi.getField(o), uoi.getObjectInspectors().get(tag), - invert); + invert, nullMarker, notNullMarker); return; } default: { @@ -971,13 +1014,24 @@ public SerDeStats getSerDeStats() { } public static void serializeStruct(Output byteStream, Object[] fieldData, - List fieldOis, boolean[] sortableSortOrders) throws SerDeException { + List fieldOis, boolean[] sortableSortOrders, + byte[] nullMarkers, byte[] notNullMarkers) throws SerDeException { for (int i = 0; i < fieldData.length; i++) { - serialize(byteStream, fieldData[i], fieldOis.get(i), sortableSortOrders[i]); + serialize(byteStream, fieldData[i], fieldOis.get(i), sortableSortOrders[i], + nullMarkers[i], notNullMarkers[i]); } } public boolean[] getSortOrders() { return columnSortOrderIsDesc; } + + public byte[] getNullMarkers() { + return columnNullMarker; + } + + public byte[] getNotNullMarkers() { + return columnNotNullMarker; + } + } diff --git serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java index ec43ae3..73e20a8 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java +++ serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDeWithEndPrefix.java @@ -28,7 +28,7 @@ public static void serializeStruct(Output byteStream, Object[] fieldData, List fieldOis, boolean endPrefix) throws SerDeException { for (int i = 0; i < fieldData.length; i++) { - serialize(byteStream, fieldData[i], fieldOis.get(i), false); + serialize(byteStream, fieldData[i], fieldOis.get(i), false, ZERO, ONE); } if (endPrefix) { if (fieldData[fieldData.length-1]!=null) { diff --git serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java index 7456725..62bcaa5 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java +++ serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/fast/BinarySortableSerializeWrite.java @@ -23,8 +23,6 @@ import java.sql.Timestamp; import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveIntervalDayTime; @@ -32,10 +30,12 @@ import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.serde2.ByteStream.Output; import org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe; +import org.apache.hadoop.hive.serde2.fast.SerializeWrite; import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hadoop.hive.serde2.io.TimestampWritable; -import org.apache.hadoop.hive.serde2.fast.SerializeWrite; import org.apache.hive.common.util.DateUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /* * Directly serialize, field-by-field, the BinarySortable format. @@ -49,6 +49,9 @@ // The sort order (ascending/descending) for each field. Set to true when descending (invert). private boolean[] columnSortOrderIsDesc; + // Null first/last + private byte[] columnNullMarker; + private byte[] columnNotNullMarker; // Which field we are on. We start with -1 to be consistent in style with // BinarySortableDeserializeRead. @@ -58,20 +61,28 @@ private TimestampWritable tempTimestampWritable; - public BinarySortableSerializeWrite(boolean[] columnSortOrderIsDesc) { + public BinarySortableSerializeWrite(boolean[] columnSortOrderIsDesc, + byte[] columnNullMarker, byte[] columnNotNullMarker) { this(); fieldCount = columnSortOrderIsDesc.length; this.columnSortOrderIsDesc = columnSortOrderIsDesc; + this.columnNullMarker = columnNullMarker; + this.columnNotNullMarker = columnNotNullMarker; } /* * Use this constructor when only ascending sort order is used. + * By default for ascending order, NULL first. */ public BinarySortableSerializeWrite(int fieldCount) { this(); this.fieldCount = fieldCount; columnSortOrderIsDesc = new boolean[fieldCount]; Arrays.fill(columnSortOrderIsDesc, false); + columnNullMarker = new byte[fieldCount]; + Arrays.fill(columnNullMarker, BinarySortableSerDe.ZERO); + columnNotNullMarker = new byte[fieldCount]; + Arrays.fill(columnNotNullMarker, BinarySortableSerDe.ONE); } // Not public since we must have the field count or column sort order information. @@ -112,7 +123,8 @@ public void reset() { */ @Override public void writeNull() throws IOException { - BinarySortableSerDe.writeByte(output, (byte) 0, columnSortOrderIsDesc[++index]); + ++index; + BinarySortableSerDe.writeByte(output, columnNullMarker[index], columnSortOrderIsDesc[index]); } /* @@ -120,10 +132,12 @@ public void writeNull() throws IOException { */ @Override public void writeBoolean(boolean v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.writeByte(output, (byte) (v ? 2 : 1), invert); } @@ -133,10 +147,12 @@ public void writeBoolean(boolean v) throws IOException { */ @Override public void writeByte(byte v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.writeByte(output, (byte) (v ^ 0x80), invert); } @@ -146,10 +162,12 @@ public void writeByte(byte v) throws IOException { */ @Override public void writeShort(short v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeShort(output, v, invert); } @@ -159,10 +177,12 @@ public void writeShort(short v) throws IOException { */ @Override public void writeInt(int v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeInt(output, v, invert); } @@ -172,10 +192,12 @@ public void writeInt(int v) throws IOException { */ @Override public void writeLong(long v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeLong(output, v, invert); } @@ -185,10 +207,12 @@ public void writeLong(long v) throws IOException { */ @Override public void writeFloat(float vf) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeFloat(output, vf, invert); } @@ -198,10 +222,12 @@ public void writeFloat(float vf) throws IOException { */ @Override public void writeDouble(double vd) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeDouble(output, vd, invert); } @@ -214,20 +240,24 @@ public void writeDouble(double vd) throws IOException { */ @Override public void writeString(byte[] v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeBytes(output, v, 0, v.length, invert); } @Override public void writeString(byte[] v, int start, int length) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeBytes(output, v, start, length, invert); } @@ -257,20 +287,24 @@ public void writeHiveVarchar(HiveVarchar hiveVarchar) throws IOException { */ @Override public void writeBinary(byte[] v) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeBytes(output, v, 0, v.length, invert); } @Override public void writeBinary(byte[] v, int start, int length) { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeBytes(output, v, start, length, invert); } @@ -280,10 +314,12 @@ public void writeBinary(byte[] v, int start, int length) { */ @Override public void writeDate(Date date) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeInt(output, DateWritable.dateToDays(date), invert); } @@ -291,10 +327,12 @@ public void writeDate(Date date) throws IOException { // We provide a faster way to write a date without a Date object. @Override public void writeDate(int dateAsDays) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeInt(output, dateAsDays, invert); } @@ -304,10 +342,12 @@ public void writeDate(int dateAsDays) throws IOException { */ @Override public void writeTimestamp(Timestamp vt) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); tempTimestampWritable.set(vt); BinarySortableSerDe.serializeTimestampWritable(output, tempTimestampWritable, invert); @@ -318,20 +358,24 @@ public void writeTimestamp(Timestamp vt) throws IOException { */ @Override public void writeHiveIntervalYearMonth(HiveIntervalYearMonth viyt) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeHiveIntervalYearMonth(output, viyt, invert); } @Override public void writeHiveIntervalYearMonth(int totalMonths) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeInt(output, totalMonths, invert); } @@ -341,10 +385,12 @@ public void writeHiveIntervalYearMonth(int totalMonths) throws IOException { */ @Override public void writeHiveIntervalDayTime(HiveIntervalDayTime vidt) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeHiveIntervalDayTime(output, vidt, invert); } @@ -354,10 +400,12 @@ public void writeHiveIntervalDayTime(HiveIntervalDayTime vidt) throws IOExceptio */ @Override public void writeHiveDecimal(HiveDecimal dec, int scale) throws IOException { - final boolean invert = columnSortOrderIsDesc[++index]; + ++index; + + final boolean invert = columnSortOrderIsDesc[index]; // This field is not a null. - BinarySortableSerDe.writeByte(output, (byte) 1, invert); + BinarySortableSerDe.writeByte(output, columnNotNullMarker[index], invert); BinarySortableSerDe.serializeHiveDecimal(output, dec, invert); } diff --git serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java index ae476ae..c43eae2 100644 --- serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java +++ serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableFast.java @@ -23,8 +23,6 @@ import java.util.Map; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.serde2.ByteStream.Output; import org.apache.hadoop.hive.serde2.SerDe; @@ -33,20 +31,23 @@ import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableDeserializeRead; import org.apache.hadoop.hive.serde2.binarysortable.fast.BinarySortableSerializeWrite; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.ObjectInspectorOptions; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.io.BytesWritable; +import junit.framework.TestCase; + public class TestBinarySortableFast extends TestCase { private void testBinarySortableFast(MyTestPrimitiveClass[] myTestPrimitiveClasses, - boolean[] columnSortOrderIsDesc, SerDe serde, StructObjectInspector rowOI, boolean ascending, + boolean[] columnSortOrderIsDesc, byte[] columnNullMarker, byte[] columnNotNullMarker, + SerDe serde, StructObjectInspector rowOI, boolean ascending, Map primitiveTypeInfoMap) throws Throwable { - BinarySortableSerializeWrite binarySortableSerializeWrite = new BinarySortableSerializeWrite(columnSortOrderIsDesc); + BinarySortableSerializeWrite binarySortableSerializeWrite = + new BinarySortableSerializeWrite(columnSortOrderIsDesc, columnNullMarker, columnNotNullMarker); // Try to serialize @@ -227,15 +228,24 @@ public void testBinarySortableFast() throws Throwable { String fieldTypes = ObjectInspectorUtils.getFieldTypes(rowOI); String order; order = StringUtils.leftPad("", MyTestPrimitiveClass.primitiveCount, '+'); - SerDe serde_ascending = TestBinarySortableSerDe.getSerDe(fieldNames, fieldTypes, order); + String nullPosition; + nullPosition = StringUtils.leftPad("", MyTestPrimitiveClass.primitiveCount, 'a'); + SerDe serde_ascending = TestBinarySortableSerDe.getSerDe(fieldNames, fieldTypes, order, nullPosition); order = StringUtils.leftPad("", MyTestPrimitiveClass.primitiveCount, '-'); - SerDe serde_descending = TestBinarySortableSerDe.getSerDe(fieldNames, fieldTypes, order); + nullPosition = StringUtils.leftPad("", MyTestPrimitiveClass.primitiveCount, 'z'); + SerDe serde_descending = TestBinarySortableSerDe.getSerDe(fieldNames, fieldTypes, order, nullPosition); boolean[] columnSortOrderIsDesc = new boolean[MyTestPrimitiveClass.primitiveCount]; Arrays.fill(columnSortOrderIsDesc, false); - testBinarySortableFast(myTestPrimitiveClasses, columnSortOrderIsDesc, serde_ascending, rowOI, true, primitiveTypeInfoMap); + byte[] columnNullMarker = new byte[MyTestPrimitiveClass.primitiveCount]; + Arrays.fill(columnNullMarker, BinarySortableSerDe.ZERO); + byte[] columnNotNullMarker = new byte[MyTestPrimitiveClass.primitiveCount]; + Arrays.fill(columnNotNullMarker, BinarySortableSerDe.ONE); + testBinarySortableFast(myTestPrimitiveClasses, columnSortOrderIsDesc, columnNullMarker, + columnNotNullMarker, serde_ascending, rowOI, true, primitiveTypeInfoMap); Arrays.fill(columnSortOrderIsDesc, true); - testBinarySortableFast(myTestPrimitiveClasses, columnSortOrderIsDesc, serde_descending, rowOI, false, primitiveTypeInfoMap); + testBinarySortableFast(myTestPrimitiveClasses, columnSortOrderIsDesc, columnNullMarker, + columnNotNullMarker, serde_descending, rowOI, false, primitiveTypeInfoMap); } catch (Throwable e) { e.printStackTrace(); throw e; diff --git serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableSerDe.java serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableSerDe.java index af47e6f..72e0917 100644 --- serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableSerDe.java +++ serde/src/test/org/apache/hadoop/hive/serde2/binarysortable/TestBinarySortableSerDe.java @@ -17,18 +17,12 @@ */ package org.apache.hadoop.hive.serde2.binarysortable; -import java.sql.Date; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Properties; import java.util.Random; -import junit.framework.TestCase; - import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeUtils; @@ -40,6 +34,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.io.BytesWritable; +import junit.framework.TestCase; + /** * TestBinarySortableSerDe. * @@ -66,12 +62,13 @@ public static String hexString(BytesWritable bytes) { return sb.toString(); } - public static SerDe getSerDe(String fieldNames, String fieldTypes, String order) + public static SerDe getSerDe(String fieldNames, String fieldTypes, String order, String nullPosition) throws Throwable { Properties schema = new Properties(); schema.setProperty(serdeConstants.LIST_COLUMNS, fieldNames); schema.setProperty(serdeConstants.LIST_COLUMN_TYPES, fieldTypes); schema.setProperty(serdeConstants.SERIALIZATION_SORT_ORDER, order); + schema.setProperty(serdeConstants.SERIALIZATION_NULL_POSITION, nullPosition); BinarySortableSerDe serde = new BinarySortableSerDe(); SerDeUtils.initializeSerDe(serde, new Configuration(), schema, null); @@ -172,11 +169,14 @@ public void testBinarySortableSerDe() throws Throwable { String order; order = StringUtils.leftPad("", MyTestClass.fieldCount, '+'); + String nullPosition; + nullPosition = StringUtils.leftPad("", MyTestClass.fieldCount, 'a'); testBinarySortableSerDe(rows, rowOI, getSerDe(fieldNames, fieldTypes, - order), true); + order, nullPosition), true); order = StringUtils.leftPad("", MyTestClass.fieldCount, '-'); + nullPosition = StringUtils.leftPad("", MyTestClass.fieldCount, 'z'); testBinarySortableSerDe(rows, rowOI, getSerDe(fieldNames, fieldTypes, - order), false); + order, nullPosition), false); System.out.println("Test testTBinarySortableProtocol passed!"); } catch (Throwable e) { diff --git testutils/metastore/metastore-upgrade-test.sh testutils/metastore/metastore-upgrade-test.sh index be29b15..3bbb8a5 100644 --- testutils/metastore/metastore-upgrade-test.sh +++ testutils/metastore/metastore-upgrade-test.sh @@ -119,15 +119,14 @@ if ! execute_test $HIVE_SCHEMA_BASE; then fi begin_upgrade_test="false" -find $HMS_UPGRADE_DIR/$DB_SERVER/upgrade-* | sort -V | while read script +while read script do - name=$(basename $script) - if [ $begin_upgrade_test = "true" ] || echo $name | grep "upgrade-$VERSION_BASE"; then + if [ $begin_upgrade_test = "true" ] || echo upgrade-$name | grep "upgrade-$VERSION_BASE"; then begin_upgrade_test="true" - if ! execute_test $script; then - echo "Error: Cannot execute SQL file: $script" + if ! execute_test $HMS_UPGRADE_DIR/$DB_SERVER/upgrade-$script.$DB_SERVER.sql; then + echo "Error: Cannot execute SQL file: $HMS_UPGRADE_DIR/$DB_SERVER/upgrade-$script.$DB_SERVER.sql" fi fi -done +done < $HMS_UPGRADE_DIR/$DB_SERVER/upgrade.order.$DB_SERVER log "Tests executed."