diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java index 9fb7550..c7def7a 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java @@ -213,6 +213,35 @@ public void testPermFunc() throws Exception { stmt.execute("DROP TABLE " + tableName); } + @Test + public void testTempTable() throws Exception { + // Create temp table with current connection + String tempTableName = "tmp1"; + stmt.execute("CREATE TEMPORARY TABLE " + tempTableName + " (key string, value string)"); + stmt.execute("load data local inpath '" + + dataFilePath.toString() + "' into table " + tempTableName); + + String resultVal = "val_238"; + String queryStr = "SELECT * FROM " + tempTableName + + " where value = '" + resultVal + "'"; + verifyResult(queryStr, resultVal, 2); + + // A second connection should not be able to see the table + Connection conn2 = DriverManager.getConnection(miniHS2.getJdbcURL(dbName), + System.getProperty("user.name"), "bar"); + Statement stmt2 = conn2.createStatement(); + stmt2.execute("USE " + dbName); + boolean gotException = false; + try { + ResultSet res; + res = stmt2.executeQuery(queryStr); + } catch (SQLException err) { + // This is expected to fail. + gotException = true; + } + assertTrue("Exception while querying non-existing temp table", gotException); + } + private void checkForNotExist(ResultSet res) throws Exception { int numRows = 0; while (res.next()) { diff --git itests/qtest/testconfiguration.properties itests/qtest/testconfiguration.properties index 6731561..81a1735 100644 --- itests/qtest/testconfiguration.properties +++ itests/qtest/testconfiguration.properties @@ -1,5 +1,5 @@ -minimr.query.files=stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q +minimr.query.files=stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q,temp_table_external.q minimr.query.negative.files=cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q,file_with_header_footer_negative.q,udf_local_resource.q minitez.query.files=tez_fsstat.q,mapjoin_decimal.q,tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q,tez_union.q,bucket_map_join_tez1.q,bucket_map_join_tez2.q,tez_schema_evolution.q -minitez.query.files.shared=cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q +minitez.query.files.shared=cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q,temp_table.q beeline.positive.exclude=add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index cc802c6..d425d2b 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -230,6 +230,7 @@ struct Table { 11: string viewExpandedText, // expanded view text, null for non-view 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, + 14: optional bool temporary=false } struct Partition { diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 8014f2a..a6cd09a 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -2455,8 +2455,8 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.__isset, b.__isset); } -const char* Table::ascii_fingerprint = "68640B4B66B355CF317429AF70D2C260"; -const uint8_t Table::binary_fingerprint[16] = {0x68,0x64,0x0B,0x4B,0x66,0xB3,0x55,0xCF,0x31,0x74,0x29,0xAF,0x70,0xD2,0xC2,0x60}; +const char* Table::ascii_fingerprint = "29EFB2A5970EF572039E5D94CC78AA85"; +const uint8_t Table::binary_fingerprint[16] = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85}; uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -2609,6 +2609,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 14: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->temporary); + this->__isset.temporary = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -2695,6 +2703,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.temporary) { + xfer += oprot->writeFieldBegin("temporary", ::apache::thrift::protocol::T_BOOL, 14); + xfer += oprot->writeBool(this->temporary); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -2715,6 +2728,7 @@ void swap(Table &a, Table &b) { swap(a.viewExpandedText, b.viewExpandedText); swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); + swap(a.temporary, b.temporary); swap(a.__isset, b.__isset); } diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 413256f..a0f208a 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -1332,7 +1332,7 @@ class StorageDescriptor { void swap(StorageDescriptor &a, StorageDescriptor &b); typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} bool tableName; bool dbName; bool owner; @@ -1346,15 +1346,16 @@ typedef struct _Table__isset { bool viewExpandedText; bool tableType; bool privileges; + bool temporary; } _Table__isset; class Table { public: - static const char* ascii_fingerprint; // = "68640B4B66B355CF317429AF70D2C260"; - static const uint8_t binary_fingerprint[16]; // = {0x68,0x64,0x0B,0x4B,0x66,0xB3,0x55,0xCF,0x31,0x74,0x29,0xAF,0x70,0xD2,0xC2,0x60}; + static const char* ascii_fingerprint; // = "29EFB2A5970EF572039E5D94CC78AA85"; + static const uint8_t binary_fingerprint[16]; // = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85}; - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType() { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) { } virtual ~Table() throw() {} @@ -1372,6 +1373,7 @@ class Table { std::string viewExpandedText; std::string tableType; PrincipalPrivilegeSet privileges; + bool temporary; _Table__isset __isset; @@ -1428,6 +1430,11 @@ class Table { __isset.privileges = true; } + void __set_temporary(const bool val) { + temporary = val; + __isset.temporary = true; + } + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -1458,6 +1465,10 @@ class Table { return false; else if (__isset.privileges && !(privileges == rhs.privileges)) return false; + if (__isset.temporary != rhs.__isset.temporary) + return false; + else if (__isset.temporary && !(temporary == rhs.temporary)) + return false; return true; } bool operator != (const Table &rhs) const { diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 1e7fca3..229a819 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -47,6 +47,7 @@ private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)11); private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); + private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -67,6 +68,7 @@ private String viewExpandedText; // required private String tableType; // required private PrincipalPrivilegeSet privileges; // optional + private boolean temporary; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -82,7 +84,8 @@ VIEW_ORIGINAL_TEXT((short)10, "viewOriginalText"), VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), TABLE_TYPE((short)12, "tableType"), - PRIVILEGES((short)13, "privileges"); + PRIVILEGES((short)13, "privileges"), + TEMPORARY((short)14, "temporary"); private static final Map byName = new HashMap(); @@ -123,6 +126,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_TYPE; case 13: // PRIVILEGES return PRIVILEGES; + case 14: // TEMPORARY + return TEMPORARY; default: return null; } @@ -166,8 +171,9 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private static final int __RETENTION_ISSET_ID = 2; + private static final int __TEMPORARY_ISSET_ID = 3; private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.PRIVILEGES}; + private _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -200,11 +206,15 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); + tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } public Table() { + this.temporary = false; + } public Table( @@ -293,6 +303,7 @@ public Table(Table other) { if (other.isSetPrivileges()) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } + this.temporary = other.temporary; } public Table deepCopy() { @@ -317,6 +328,8 @@ public void clear() { this.viewExpandedText = null; this.tableType = null; this.privileges = null; + this.temporary = false; + } public String getTableName() { @@ -641,6 +654,28 @@ public void setPrivilegesIsSet(boolean value) { } } + public boolean isTemporary() { + return this.temporary; + } + + public void setTemporary(boolean temporary) { + this.temporary = temporary; + setTemporaryIsSet(true); + } + + public void unsetTemporary() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TEMPORARY_ISSET_ID); + } + + /** Returns true if field temporary is set (has been assigned a value) and false otherwise */ + public boolean isSetTemporary() { + return EncodingUtils.testBit(__isset_bitfield, __TEMPORARY_ISSET_ID); + } + + public void setTemporaryIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -747,6 +782,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TEMPORARY: + if (value == null) { + unsetTemporary(); + } else { + setTemporary((Boolean)value); + } + break; + } } @@ -791,6 +834,9 @@ public Object getFieldValue(_Fields field) { case PRIVILEGES: return getPrivileges(); + case TEMPORARY: + return Boolean.valueOf(isTemporary()); + } throw new IllegalStateException(); } @@ -828,6 +874,8 @@ public boolean isSet(_Fields field) { return isSetTableType(); case PRIVILEGES: return isSetPrivileges(); + case TEMPORARY: + return isSetTemporary(); } throw new IllegalStateException(); } @@ -962,6 +1010,15 @@ public boolean equals(Table that) { return false; } + boolean this_present_temporary = true && this.isSetTemporary(); + boolean that_present_temporary = true && that.isSetTemporary(); + if (this_present_temporary || that_present_temporary) { + if (!(this_present_temporary && that_present_temporary)) + return false; + if (this.temporary != that.temporary) + return false; + } + return true; } @@ -1034,6 +1091,11 @@ public int hashCode() { if (present_privileges) builder.append(privileges); + boolean present_temporary = true && (isSetTemporary()); + builder.append(present_temporary); + if (present_temporary) + builder.append(temporary); + return builder.toHashCode(); } @@ -1175,6 +1237,16 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTemporary()).compareTo(typedOther.isSetTemporary()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTemporary()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.temporary, typedOther.temporary); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1288,6 +1360,12 @@ public String toString() { } first = false; } + if (isSetTemporary()) { + if (!first) sb.append(", "); + sb.append("temporary:"); + sb.append(this.temporary); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1468,6 +1546,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 14: // TEMPORARY + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.temporary = iprot.readBool(); + struct.setTemporaryIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1557,6 +1643,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldEnd(); } } + if (struct.isSetTemporary()) { + oprot.writeFieldBegin(TEMPORARY_FIELD_DESC); + oprot.writeBool(struct.temporary); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1614,7 +1705,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetPrivileges()) { optionals.set(12); } - oprot.writeBitSet(optionals, 13); + if (struct.isSetTemporary()) { + optionals.set(13); + } + oprot.writeBitSet(optionals, 14); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1667,12 +1761,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetPrivileges()) { struct.privileges.write(oprot); } + if (struct.isSetTemporary()) { + oprot.writeBool(struct.temporary); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(13); + BitSet incoming = iprot.readBitSet(14); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1748,6 +1845,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.privileges.read(iprot); struct.setPrivilegesIsSet(true); } + if (incoming.get(13)) { + struct.temporary = iprot.readBool(); + struct.setTemporaryIsSet(true); + } } } diff --git metastore/src/gen/thrift/gen-php/metastore/Types.php metastore/src/gen/thrift/gen-php/metastore/Types.php index 46f6a04..3db3ded 100644 --- metastore/src/gen/thrift/gen-php/metastore/Types.php +++ metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -3162,6 +3162,7 @@ class Table { public $viewExpandedText = null; public $tableType = null; public $privileges = null; + public $temporary = false; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -3233,6 +3234,10 @@ class Table { 'type' => TType::STRUCT, 'class' => '\metastore\PrincipalPrivilegeSet', ), + 14 => array( + 'var' => 'temporary', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -3275,6 +3280,9 @@ class Table { if (isset($vals['privileges'])) { $this->privileges = $vals['privileges']; } + if (isset($vals['temporary'])) { + $this->temporary = $vals['temporary']; + } } } @@ -3414,6 +3422,13 @@ class Table { $xfer += $input->skip($ftype); } break; + case 14: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->temporary); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -3523,6 +3538,11 @@ class Table { $xfer += $this->privileges->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->temporary !== null) { + $xfer += $output->writeFieldBegin('temporary', TType::BOOL, 14); + $xfer += $output->writeBool($this->temporary); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index b3eeb89..43a498a 100644 --- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -2153,6 +2153,7 @@ class Table: - viewExpandedText - tableType - privileges + - temporary """ thrift_spec = ( @@ -2170,9 +2171,10 @@ class Table: (11, TType.STRING, 'viewExpandedText', None, None, ), # 11 (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 + (14, TType.BOOL, 'temporary', None, False, ), # 14 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None,): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -2186,6 +2188,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.viewExpandedText = viewExpandedText self.tableType = tableType self.privileges = privileges + self.temporary = temporary def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2275,6 +2278,11 @@ def read(self, iprot): self.privileges.read(iprot) else: iprot.skip(ftype) + elif fid == 14: + if ftype == TType.BOOL: + self.temporary = iprot.readBool(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2344,6 +2352,10 @@ def write(self, oprot): oprot.writeFieldBegin('privileges', TType.STRUCT, 13) self.privileges.write(oprot) oprot.writeFieldEnd() + if self.temporary is not None: + oprot.writeFieldBegin('temporary', TType.BOOL, 14) + oprot.writeBool(self.temporary) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() diff --git metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 757461f..feb99db 100644 --- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -531,6 +531,7 @@ class Table VIEWEXPANDEDTEXT = 11 TABLETYPE = 12 PRIVILEGES = 13 + TEMPORARY = 14 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -545,7 +546,8 @@ class Table VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'}, VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, - PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true} + PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, + TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} } def struct_fields; FIELDS; end diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 664dccd..a6fb71d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -139,7 +139,7 @@ private boolean isConnected = false; private URI metastoreUris[]; private final HiveMetaHookLoader hookLoader; - private final HiveConf conf; + protected final HiveConf conf; private String tokenStrForm; private final boolean localMetaStore; @@ -147,7 +147,7 @@ private int retries = 5; private int retryDelaySeconds = 0; - static final private Log LOG = LogFactory.getLog("hive.metastore"); + static final protected Log LOG = LogFactory.getLog("hive.metastore"); public HiveMetaStoreClient(HiveConf conf) throws MetaException { @@ -555,7 +555,8 @@ public void createTable(Table tbl, EnvironmentContext envContext) throws Already } boolean success = false; try { - client.create_table_with_environment_context(tbl, envContext); + // Subclasses can override this step (for example, for temporary tables) + create_table_with_environment_context(tbl, envContext); if (hook != null) { hook.commitCreateTable(tbl); } @@ -617,7 +618,8 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD List tableList = getAllTables(name); for (String table : tableList) { try { - dropTable(name, table, deleteData, false); + // Subclasses can override this step (for example, for temporary tables) + dropTable(name, table, deleteData, false); } catch (UnsupportedOperationException e) { // Ignore Index tables, those will be dropped with parent tables } @@ -771,7 +773,7 @@ public void dropTable(String dbname, String name, boolean deleteData, } boolean success = false; try { - client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + drop_table_with_environment_context(dbname, name, deleteData, envContext); if (hook != null) { hook.commitDropTable(tbl, deleteData); } @@ -1342,7 +1344,7 @@ private Database deepCopy(Database database) { return copy; } - private Table deepCopy(Table table) { + protected Table deepCopy(Table table) { Table copy = null; if (table != null) { copy = new Table(table); @@ -1727,4 +1729,15 @@ public Function getFunction(String dbName, String funcName) return client.get_functions(dbName, pattern); } + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + client.create_table_with_environment_context(tbl, envContext); + } + + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 9e8d912..82b871b 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -102,7 +102,7 @@ private MetaStoreFS getMetaStoreFsHandler(Configuration conf) /** * Helper functions to convert IOException to MetaException */ - public FileSystem getFs(Path f) throws MetaException { + public static FileSystem getFs(Path f, Configuration conf) throws MetaException { try { return f.getFileSystem(conf); } catch (IOException e) { @@ -111,6 +111,10 @@ public FileSystem getFs(Path f) throws MetaException { return null; } + public FileSystem getFs(Path f) throws MetaException { + return getFs(f, conf); + } + public static void closeFs(FileSystem fs) throws MetaException { try { if (fs != null) { @@ -135,12 +139,16 @@ public static void closeFs(FileSystem fs) throws MetaException { * Path to be canonicalized * @return Path with canonical scheme and authority */ - public Path getDnsPath(Path path) throws MetaException { - FileSystem fs = getFs(path); + public static Path getDnsPath(Path path, Configuration conf) throws MetaException { + FileSystem fs = getFs(path, conf); return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path .toUri().getPath())); } + public Path getDnsPath(Path path) throws MetaException { + return getDnsPath(path, conf); + } + /** * Resolve the configured warehouse root dir with respect to the configuration * This involves opening the FileSystem corresponding to the warehouse root @@ -174,7 +182,6 @@ public Path getDefaultDatabasePath(String dbName) throws MetaException { return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); } - public Path getTablePath(Database db, String tableName) throws MetaException { return getDnsPath(new Path(getDatabasePath(db), tableName.toLowerCase())); diff --git ql/src/java/org/apache/hadoop/hive/ql/Context.java ql/src/java/org/apache/hadoop/hive/ql/Context.java index abc4290..4daefa7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -73,13 +73,13 @@ private final Map pathToCS = new ConcurrentHashMap(); // scratch path to use for all non-local (ie. hdfs) file system tmp folders - private final Path nonLocalScratchPath; + private Path nonLocalScratchPath; // scratch directory to use for local file system tmp folders - private final String localScratchDir; + private String localScratchDir; // the permission to scratch directory (local and hdfs) - private final String scratchDirPermission; + private String scratchDirPermission; // Keeps track of scratch directories created for different scheme/authority private final Map fsScratchDirs = new HashMap(); @@ -124,12 +124,9 @@ public Context(Configuration conf, String executionId) { // local & non-local tmp location is configurable. however it is the same across // all external file systems - nonLocalScratchPath = - new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), - executionId); - localScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), - executionId).toUri().getPath(); - scratchDirPermission= HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId); + localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath(); + scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 24f829f..0f17847 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -2193,6 +2193,7 @@ private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveExc private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws HiveException { // get the create table statement for the table and populate the output final String EXTERNAL = "external"; + final String TEMPORARY = "temporary"; final String LIST_COLUMNS = "columns"; final String TBL_COMMENT = "tbl_comment"; final String LIST_PARTITIONS = "partitions"; @@ -2224,7 +2225,7 @@ private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws H return 0; } - createTab_str.append("CREATE <" + EXTERNAL + "> TABLE `"); + createTab_str.append("CREATE <" + TEMPORARY + "><" + EXTERNAL + ">TABLE `"); createTab_str.append(tableName + "`(\n"); createTab_str.append("<" + LIST_COLUMNS + ">)\n"); createTab_str.append("<" + TBL_COMMENT + ">\n"); @@ -2239,11 +2240,17 @@ private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws H createTab_str.append("<" + TBL_PROPERTIES + ">)\n"); ST createTab_stmt = new ST(createTab_str.toString()); + // For cases where the table is temporary + String tbl_temp = ""; + if (tbl.isTemporary()) { + duplicateProps.add("TEMPORARY"); + tbl_temp = "TEMPORARY "; + } // For cases where the table is external String tbl_external = ""; if (tbl.getTableType() == TableType.EXTERNAL_TABLE) { duplicateProps.add("EXTERNAL"); - tbl_external = "EXTERNAL"; + tbl_external = "EXTERNAL "; } // Columns @@ -2399,6 +2406,7 @@ else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { tbl_properties += StringUtils.join(realProps, ", \n"); } + createTab_stmt.add(TEMPORARY, tbl_temp); createTab_stmt.add(EXTERNAL, tbl_external); createTab_stmt.add(LIST_COLUMNS, tbl_columns); createTab_stmt.add(TBL_COMMENT, tbl_comment); @@ -4242,6 +4250,8 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { tbl.setSkewedColValues(crtTbl.getSkewedColValues()); } + tbl.getTTable().setTemporary(crtTbl.isTemporary()); + tbl.setStoredAsSubDirectories(crtTbl.isStoredAsSubDirectories()); tbl.setInputFormatClass(crtTbl.getInputFormat()); @@ -4389,6 +4399,8 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveExce params.putAll(crtTbl.getTblProps()); } + tbl.getTTable().setTemporary(crtTbl.isTemporary()); + if (crtTbl.isExternal()) { tbl.setProperty("EXTERNAL", "TRUE"); tbl.setTableType(TableType.EXTERNAL_TABLE); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 4d35176..669175b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -681,6 +681,10 @@ public void createIndex(String tableName, String indexName, String indexHandlerC if (baseTbl.getTableType() == TableType.VIRTUAL_VIEW.toString()) { throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported."); } + if (baseTbl.isTemporary()) { + throw new HiveException("tableName=" + tableName + + " is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported."); + } if (indexTblName == null) { indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName); @@ -2487,7 +2491,7 @@ public HiveMetaHook getHook( } }; return RetryingMetaStoreClient.getProxy(conf, hookLoader, - HiveMetaStoreClient.class.getName()); + SessionHiveMetaStoreClient.class.getName()); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java new file mode 100644 index 0000000..aafdfa9 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -0,0 +1,354 @@ +package org.apache.hadoop.hive.ql.metadata; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf;; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.thrift.TException; + +public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements IMetaStoreClient { + + SessionHiveMetaStoreClient(HiveConf conf) throws MetaException { + super(conf); + } + + SessionHiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) throws MetaException { + super(conf, hookLoader); + } + + private Warehouse wh = null; + + private Warehouse getWh() throws MetaException { + if (wh == null) { + wh = new Warehouse(conf); + } + return wh; + } + + @Override + protected void create_table_with_environment_context( + org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + + if (tbl.isTemporary()) { + createTempTable(tbl, envContext); + return; + } + // non-temp tables should use underlying client. + super.create_table_with_environment_context(tbl, envContext); + } + + @Override + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + // First try temp table + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); + if (table != null) { + dropTempTable(table, deleteData, envContext); + return; + } + + // Try underlying client + super.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } + + @Override + public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name) throws MetaException, + TException, NoSuchObjectException { + // First check temp tables + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); + if (table != null) { + return deepCopy(table); // Original method used deepCopy(), do the same here. + } + + // Try underlying client + return super.getTable(dbname, name); + } + + @Override + public List getAllTables(String dbName) throws MetaException { + List tableNames = super.getAllTables(dbName); + + // May need to merge with list of temp tables + Map tables = getTempTablesForDatabase(dbName); + if (tables == null || tables.size() == 0) { + return tableNames; + } + + // Get list of temp table names + Set tempTableNames = tables.keySet(); + + // Merge and sort result + Set allTableNames = new HashSet(tableNames.size() + tempTableNames.size()); + allTableNames.addAll(tableNames); + allTableNames.addAll(tempTableNames); + tableNames = new ArrayList(allTableNames); + Collections.sort(tableNames); + return tableNames; + } + + @Override + public List getTables(String dbName, String tablePattern) throws MetaException { + List tableNames = super.getTables(dbName, tablePattern); + + // May need to merge with list of temp tables + dbName = dbName.toLowerCase(); + tablePattern = tablePattern.toLowerCase(); + Map tables = getTempTablesForDatabase(dbName); + if (tables == null || tables.size() == 0) { + return tableNames; + } + tablePattern = tablePattern.replaceAll("\\*", ".*"); + Pattern pattern = Pattern.compile(tablePattern); + Matcher matcher = pattern.matcher(""); + Set combinedTableNames = new HashSet(); + for (String tableName : tables.keySet()) { + if (matcher == null) { + matcher = pattern.matcher(tableName); + } else { + matcher.reset(tableName); + } + if (matcher.matches()) { + combinedTableNames.add(tableName); + } + } + + // Combine/sort temp and normal table results + combinedTableNames.addAll(tableNames); + tableNames = new ArrayList(combinedTableNames); + Collections.sort(tableNames); + return tableNames; + } + + @Override + public List getTableObjectsByName(String dbName, + List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + + dbName = dbName.toLowerCase(); + if (SessionState.get().getTempTables().size() == 0) { + // No temp tables, just call underlying client + return super.getTableObjectsByName(dbName, tableNames); + } + + List tables = + new ArrayList(); + for (String tableName : tableNames) { + try { + org.apache.hadoop.hive.metastore.api.Table table = getTable(dbName, tableName); + if (table != null) { + tables.add(table); + } + } catch (NoSuchObjectException err) { + // Ignore error, just return the valid tables that are found. + } + } + return tables; + } + + + @Override + public boolean tableExists(String databaseName, String tableName) throws MetaException, + TException, UnknownDBException { + // First check temp tables + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(databaseName, tableName); + if (table != null) { + return true; + } + + // Try underlying client + return super.tableExists(databaseName, tableName); + } + + public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + // First try temp table + org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); + if (old_tbl != null) { + alterTempTable(dbname, tbl_name, old_tbl, new_tbl, envContext); + return; + } + + // Try underlying client + super.alter_table(dbname, tbl_name, new_tbl, envContext); + } + + private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, + EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + + SessionState ss = SessionState.get(); + if (ss == null) { + throw new MetaException("No current SessionState, cannot create temporary table" + + tbl.getDbName() + "." + tbl.getTableName()); + } + + // We may not own the table object, create a copy + tbl = deepCopyAndLowerCaseTable(tbl); + + String dbName = tbl.getDbName(); + String tblName = tbl.getTableName(); + Map tables = getTempTablesForDatabase(dbName); + if (tables != null && tables.containsKey(tblName)) { + throw new MetaException("Temporary table " + dbName + "." + tblName + " already exists"); + } + + // Create temp table directory + Warehouse wh = getWh(); + Path tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); + if (tblPath != null) { + if (!wh.isDir(tblPath)) { + if (!wh.mkdirs(tblPath, true)) { + throw new MetaException(tblPath + + " is not a directory or unable to create one"); + } + } + } else { + throw new MetaException("Temp table path not set for " + tbl.getTableName()); + } + + // Add temp table info to current session + Table tTable = new Table(tbl); + if (tables == null) { + tables = new HashMap(); + ss.getTempTables().put(dbName, tables); + } + tables.put(tblName, tTable); + } + + private org.apache.hadoop.hive.metastore.api.Table getTempTable(String dbName, String tableName) { + Map tables = getTempTablesForDatabase(dbName.toLowerCase()); + if (tables != null) { + Table table = tables.get(tableName.toLowerCase()); + if (table != null) { + return table.getTTable(); + } + } + return null; + } + + private void alterTempTable(String dbname, String tbl_name, + org.apache.hadoop.hive.metastore.api.Table oldt, + org.apache.hadoop.hive.metastore.api.Table newt, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + Table newTable = new Table(deepCopyAndLowerCaseTable(newt)); + dbname = dbname.toLowerCase(); + tbl_name = tbl_name.toLowerCase(); + + // Disallow changing temp table location + if (!newt.getSd().getLocation().equals(oldt.getSd().getLocation())) { + throw new MetaException("Temp table location cannot be changed"); + } + + String newDbName = newTable.getDbName(); + String newTableName = newTable.getTableName(); + if (!newDbName.equals(oldt.getDbName()) || !newTableName.equals(oldt.getTableName())) { + // Table was renamed. + + // Do not allow temp table rename if the new name already exists as a temp table + if (getTempTable(newDbName, newTableName) != null) { + throw new MetaException("Cannot rename temporary table to " + newTableName + + " - temporary table already exists with the same name"); + } + + // Remove old temp table entry, and add new entry to list of temp tables. + // Note that for temp tables there is no need to rename directories + Map tables = getTempTablesForDatabase(dbname); + if (tables == null || tables.remove(tbl_name) == null) { + throw new MetaException("Could not find temp table entry for " + dbname + "." + tbl_name); + } + + tables = getTempTablesForDatabase(newDbName); + if (tables == null) { + tables = new HashMap(); + SessionState.get().getTempTables().put(newDbName, tables); + } + tables.put(newTableName, newTable); + } else { + getTempTablesForDatabase(dbname).put(tbl_name, newTable); + } + } + + private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boolean deleteData, + EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + + String dbName = table.getDbName().toLowerCase(); + String tableName = table.getTableName().toLowerCase(); + + // Determine the temp table path + Path tablePath = null; + String pathStr = table.getSd().getLocation(); + if (pathStr != null) { + try { + tablePath = new Path(table.getSd().getLocation()); + if (!getWh().isWritable(tablePath.getParent())) { + throw new MetaException("Table metadata not deleted since " + tablePath.getParent() + + " is not writable by " + conf.getUser()); + } + } catch (IOException err) { + throw new MetaException(err.getMessage()); + } + } + + // Remove table entry from SessionState + Map tables = getTempTablesForDatabase(dbName); + if (tables == null || tables.remove(tableName) == null) { + throw new MetaException("Could not find temp table entry for " + dbName + "." + tableName); + } + + // Delete table data + if (deleteData && !MetaStoreUtils.isExternalTable(table)) { + try { + getWh().deleteDir(tablePath, true); + } catch (Exception err) { + LOG.error("Failed to delete temp table directory: " + tablePath + " " + err.getMessage()); + // Forgive error + } + } + } + + private org.apache.hadoop.hive.metastore.api.Table deepCopyAndLowerCaseTable( + org.apache.hadoop.hive.metastore.api.Table tbl) { + org.apache.hadoop.hive.metastore.api.Table newCopy = deepCopy(tbl); + newCopy.setDbName(newCopy.getDbName().toLowerCase()); + newCopy.setTableName(newCopy.getTableName().toLowerCase()); + return newCopy; + } + + private Map getTempTablesForDatabase(String dbName) { + SessionState ss = SessionState.get(); + if (ss == null) { + LOG.debug("No current SessionState, skipping temp tables"); + return null; + } + return ss.getTempTables().get(dbName); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 3df2690..563dbd1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -996,4 +996,8 @@ public String getCompleteName() { throw new RuntimeException("Cannot get path ", e); } } + + public boolean isTemporary() { + return tTable.isTemporary(); + } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 1270520..2d4ee13 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -475,6 +475,7 @@ public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws SemanticE originalTree = tree; boolean isPartitionStats = isPartitionLevelStats(tree); PartitionList partList = null; + checkIfTemporaryTable(); checkForPartitionColumns(colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys())); validateSpecifiedColumnNames(colNames); @@ -525,6 +526,13 @@ private void checkForPartitionColumns(List specifiedCols, List p } } + private void checkIfTemporaryTable() throws SemanticException { + if (tbl.isTemporary()) { + throw new SemanticException(tbl.getTableName() + + " is a temporary table. Column statistics are not supported on temporary tables."); + } + } + @Override public void analyze(ASTNode ast, Context origCtx) throws SemanticException { QB qb; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index f934ac4..5ac6452 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -804,7 +804,7 @@ databaseComment createTableStatement @init { pushMsg("create table statement", state); } @after { popMsg(state); } - : KW_CREATE (ext=KW_EXTERNAL)? KW_TABLE ifNotExists? name=tableName + : KW_CREATE (temp=KW_TEMPORARY)? (ext=KW_EXTERNAL)? KW_TABLE ifNotExists? name=tableName ( like=KW_LIKE likeName=tableName tableLocation? tablePropertiesPrefixed? @@ -819,7 +819,7 @@ createTableStatement tablePropertiesPrefixed? (KW_AS selectStatementWithCTE)? ) - -> ^(TOK_CREATETABLE $name $ext? ifNotExists? + -> ^(TOK_CREATETABLE $name $temp? $ext? ifNotExists? ^(TOK_LIKETABLE $likeName?) columnNameTypeList? tableComment? diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 71471f4..83b6304 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -92,6 +92,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { table.getTableName(), false, // isExternal: set to false here, can be overwritten by the // IMPORT stmt + table.isTemporary(), table.getSd().getCols(), table.getPartitionKeys(), table.getSd().getBucketCols(), diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 83d09c0..a759ea8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX; import java.io.IOException; import java.io.Serializable; @@ -30,6 +31,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; @@ -9986,6 +9988,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) Map tblProps = null; boolean ifNotExists = false; boolean isExt = false; + boolean isTemporary = false; ASTNode selectStmt = null; final int CREATE_TABLE = 0; // regular CREATE TABLE final int CTLT = 1; // CREATE TABLE LIKE ... (CTLT) @@ -10022,6 +10025,9 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case HiveParser.KW_EXTERNAL: isExt = true; break; + case HiveParser.KW_TEMPORARY: + isTemporary = true; + break; case HiveParser.TOK_LIKETABLE: if (child.getChildCount() > 0) { likeTableName = getUnescapedName((ASTNode) child.getChild(0)); @@ -10145,6 +10151,27 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0]; Database database = getDatabase(dbName); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); + + if (isTemporary) { + if (partCols.size() > 0) { + throw new SemanticException("Partition columns are not supported on temporary tables"); + } + + if (location == null) { + // for temporary tables we set the location to something in the session's scratch dir + // it has the same life cycle as the tmp table + try { + // Generate a unique ID for temp table path. + // This path will be fixed for the life of the temp table. + Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString()); + path = Warehouse.getDnsPath(path, conf); + location = path.toString(); + } catch (MetaException err) { + throw new SemanticException(err); + } + } + } + // Handle different types of CREATE TABLE command CreateTableDesc crtTblDesc = null; switch (command_type) { @@ -10152,7 +10179,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case CREATE_TABLE: // REGULAR CREATE TABLE DDL tblProps = addDefaultProperties(tblProps); - crtTblDesc = new CreateTableDesc(tableName, isExt, cols, partCols, + crtTblDesc = new CreateTableDesc(tableName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, @@ -10174,7 +10201,14 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case CTLT: // create table like tblProps = addDefaultProperties(tblProps); - CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, + if (isTemporary) { + Table likeTable = getTableWithQN(likeTableName, false); + if (likeTable != null && likeTable.getPartCols().size() > 0) { + throw new SemanticException("Attempting to do CREATE TABLE LIKE on a table with " + + "partition columns. Partition columns are not supported on temporary tables"); + } + } + CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, isTemporary, storageFormat.inputFormat, storageFormat.outputFormat, location, shared.serde, shared.serdeProps, tblProps, ifNotExists, likeTableName); SessionState.get().setCommandType(HiveOperation.CREATETABLE); @@ -10196,7 +10230,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) tblProps = addDefaultProperties(tblProps); - crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, cols, partCols, + crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 2537b75..52a8096 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -77,11 +77,12 @@ List skewedColNames; List> skewedColValues; boolean isStoredAsSubDirectories = false; + boolean isTemporary = false; public CreateTableDesc() { } - public CreateTableDesc(String databaseName, String tableName, boolean isExternal, + public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -92,7 +93,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal Map tblProps, boolean ifNotExists, List skewedColNames, List> skewedColValues) { - this(tableName, isExternal, cols, partCols, + this(tableName, isExternal, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, @@ -101,7 +102,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal this.databaseName = databaseName; } - public CreateTableDesc(String tableName, boolean isExternal, + public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -113,6 +114,7 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean ifNotExists, List skewedColNames, List> skewedColValues) { this.tableName = tableName; this.isExternal = isExternal; + this.isTemporary = isTemporary; this.bucketCols = new ArrayList(bucketCols); this.sortCols = new ArrayList(sortCols); this.collItemDelim = collItemDelim; @@ -535,4 +537,19 @@ public void setNullFormat(String nullFormat) { this.nullFormat = nullFormat; } + /** + * @return the isTemporary + */ + @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) + public boolean isTemporary() { + return isTemporary; + } + + /** + * @param isTemporary table is Temporary or not. + */ + public void setTemporary(boolean isTemporary) { + this.isTemporary = isTemporary; + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java index cb5d64c..c9ce30f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java @@ -38,16 +38,18 @@ Map tblProps; boolean ifNotExists; String likeTableName; + boolean isTemporary = false; public CreateTableLikeDesc() { } - public CreateTableLikeDesc(String tableName, boolean isExternal, + public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, String defaultInputFormat, String defaultOutputFormat, String location, String defaultSerName, Map defaultSerdeProps, Map tblProps, boolean ifNotExists, String likeTableName) { this.tableName = tableName; this.isExternal = isExternal; + this.isTemporary = isTemporary; this.defaultInputFormat=defaultInputFormat; this.defaultOutputFormat=defaultOutputFormat; this.defaultSerName=defaultSerName; @@ -168,4 +170,20 @@ public void setLikeTableName(String likeTableName) { public void setTblProps(Map tblProps) { this.tblProps = tblProps; } + + /** + * @return the isTemporary + */ + @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) + public boolean isTemporary() { + return isTemporary; + } + + /** + * @param isTemporary table is Temporary or not. + */ + public void setTemporary(boolean isTemporary) { + this.isTemporary = isTemporary; + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 2143d0c..ec89dc3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -55,6 +56,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; @@ -75,6 +77,12 @@ public class SessionState { private static final Log LOG = LogFactory.getLog(SessionState.class); + public static final String TMP_PREFIX = "_tmp_space.db"; + public static final String LOCAL_SESSION_PATH_KEY = "_hive.local.session.path"; + public static final String HDFS_SESSION_PATH_KEY = "_hive.hdfs.session.path"; + public static final String TMP_TABLE_SPACE_KEY = "_hive.tmp_table_space"; + private Map> tempTables = new HashMap>(); + protected ClassLoader parentLoader; /** @@ -172,6 +180,15 @@ private final String userName; + // scratch path to use for all non-local (ie. hdfs) file system tmp folders + private Path hdfsSessionPath; + + // sub dir of hdfs session path. used to keep tmp tables + private Path hdfsTmpTableSpace; + + // scratch directory to use for local file system tmp folders + private Path localSessionPath; + /** * Get the lineage state stored in this session. * @@ -335,6 +352,7 @@ public static SessionState start(SessionState startSs) { Hive.get(new HiveConf(startSs.conf)).getMSC(); ShimLoader.getHadoopShims().getUGIForConf(startSs.conf); FileSystem.get(startSs.conf); + startSs.createSessionPaths(startSs.conf); } catch (Exception e) { // catch-all due to some exec time dependencies on session state // that would cause ClassNoFoundException otherwise @@ -359,6 +377,83 @@ public static SessionState start(SessionState startSs) { return startSs; } + public static Path getLocalSessionPath(Configuration conf) { + SessionState ss = SessionState.get(); + if (ss != null) { + return ss.localSessionPath; + } else { + return new Path(conf.get(LOCAL_SESSION_PATH_KEY)); + } + } + + public static Path getHDFSSessionPath(Configuration conf) { + SessionState ss = SessionState.get(); + if (ss != null) { + return ss.hdfsSessionPath; + } else { + return new Path(conf.get(HDFS_SESSION_PATH_KEY)); + } + } + + public static Path getTempTableSpace(Configuration conf) { + SessionState ss = SessionState.get(); + if (ss != null) { + return ss.getTempTableSpace(); + } else { + return new Path(conf.get(TMP_TABLE_SPACE_KEY)); + } + } + + public Path getTempTableSpace() { + return this.hdfsTmpTableSpace; + } + + private void dropSessionPaths(Configuration conf) throws IOException { + if (hdfsSessionPath != null) { + hdfsSessionPath.getFileSystem(conf).delete(hdfsSessionPath, true); + } + if (localSessionPath != null) { + localSessionPath.getFileSystem(conf).delete(localSessionPath, true); + } + } + + private void createSessionPaths(Configuration conf) throws IOException { + + String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + String sessionId = getSessionId(); + + // local & non-local tmp location is configurable. however it is the same across + // all external file systems + hdfsSessionPath = + new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), + sessionId); + createPath(conf, hdfsSessionPath, scratchDirPermission); + conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString()); + + localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), + sessionId); + createPath(conf, localSessionPath, scratchDirPermission); + conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString()); + hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX); + createPath(conf, hdfsTmpTableSpace, scratchDirPermission); + conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString()); + } + + private void createPath(Configuration conf, Path p, String perm) throws IOException { + FileSystem fs = p.getFileSystem(conf); + p = new Path(fs.makeQualified(p).toString()); + FsPermission fsPermission = new FsPermission(Short.parseShort(perm.trim(), 8)); + + if (!Utilities.createDirsWithPermission(conf, p, fsPermission)) { + throw new IOException("Cannot make directory: " + + p.toString()); + } + + // best effort to clean up if we don't shut down properly + fs.deleteOnExit(p); + } + + /** * Setup authentication and authorization plugins for this session. */ @@ -922,6 +1017,8 @@ public void close() throws IOException { } finally { tezSessionState = null; } + + dropSessionPaths(conf); } public AuthorizationMode getAuthorizationMode(){ @@ -991,4 +1088,12 @@ public void applyAuthorizationPolicy() throws HiveException { conf.set(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, Boolean.TRUE.toString()); } + + public Map> getTempTables() { + return tempTables; + } + + public void setTempTables(Map> tempTables) { + this.tempTables = tempTables; + } } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java index 43125f7..0835bde 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -156,6 +157,7 @@ public Edge answer(InvocationOnMock invocation) throws Throwable { conf = new JobConf(); appLr = mock(LocalResource.class); + SessionState.start(new HiveConf()); session = mock(TezSession.class); sessionState = mock(TezSessionState.class); when(sessionState.getSession()).thenReturn(session); @@ -166,6 +168,7 @@ public Edge answer(InvocationOnMock invocation) throws Throwable { @After public void tearDown() throws Exception { + SessionState.get().close(); utils = null; work = null; task = null; diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java index 98c3cc3..d20d172 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.junit.After; @@ -51,6 +52,7 @@ public TestDbTxnManager() throws Exception { TxnDbUtil.setConfValues(conf); + SessionState.start(conf); ctx = new Context(conf); LogManager.getRootLogger().setLevel(Level.DEBUG); tearDown(); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java index 91de8da..46f8052 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestMacroSemanticAnalyzer.java @@ -42,10 +42,10 @@ @Before public void setup() throws Exception { conf = new HiveConf(); + SessionState.start(conf); context = new Context(conf); parseDriver = new ParseDriver(); analyzer = new MacroSemanticAnalyzer(conf); - SessionState.start(conf); } private ASTNode parse(String command) throws Exception { diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java index 20d08b3..ab0d80e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java @@ -69,10 +69,10 @@ public void setup() throws Exception { db = Mockito.mock(Hive.class); table = new Table(DB, TABLE); partition = new Partition(table); + SessionState.start(conf); context = new Context(conf); parseDriver = new ParseDriver(); analyzer = new DDLSemanticAnalyzer(conf, db); - SessionState.start(conf); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap(), false)) .thenReturn(partition); diff --git ql/src/test/queries/clientnegative/temp_table_authorize_create_tbl.q ql/src/test/queries/clientnegative/temp_table_authorize_create_tbl.q new file mode 100644 index 0000000..bb75777 --- /dev/null +++ ql/src/test/queries/clientnegative/temp_table_authorize_create_tbl.q @@ -0,0 +1,10 @@ +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +set hive.security.authorization.enabled=true; +set user.name=user33; +create database db23221; +use db23221; + +set user.name=user44; +create temporary table twew221(a string); diff --git ql/src/test/queries/clientnegative/temp_table_column_stats.q ql/src/test/queries/clientnegative/temp_table_column_stats.q new file mode 100644 index 0000000..9b7aa4a --- /dev/null +++ ql/src/test/queries/clientnegative/temp_table_column_stats.q @@ -0,0 +1,5 @@ +create temporary table tmp1 (c1 string); +-- table-level stats should work +analyze table tmp1 compute statistics; +-- column stats should fail +analyze table tmp1 compute statistics for columns; diff --git ql/src/test/queries/clientnegative/temp_table_create_like_partitions.q ql/src/test/queries/clientnegative/temp_table_create_like_partitions.q new file mode 100644 index 0000000..c504280 --- /dev/null +++ ql/src/test/queries/clientnegative/temp_table_create_like_partitions.q @@ -0,0 +1,3 @@ +create table tab1 (c1 string) partitioned by (p1 string); +create table tmp1 like tab1; +show table create tmp1; diff --git ql/src/test/queries/clientnegative/temp_table_index.q ql/src/test/queries/clientnegative/temp_table_index.q new file mode 100644 index 0000000..91f45ce --- /dev/null +++ ql/src/test/queries/clientnegative/temp_table_index.q @@ -0,0 +1,2 @@ +create temporary table tmp1 (c1 string); +create index tmp1_idx on table tmp1 (c1) as 'COMPACT' with deferred rebuild; diff --git ql/src/test/queries/clientnegative/temp_table_partitions.q ql/src/test/queries/clientnegative/temp_table_partitions.q new file mode 100644 index 0000000..9592785 --- /dev/null +++ ql/src/test/queries/clientnegative/temp_table_partitions.q @@ -0,0 +1,2 @@ +-- temp tables with partition columns not currently supported +create temporary table tmp1 (c1 string) partitioned by (p1 string); diff --git ql/src/test/queries/clientnegative/temp_table_rename.q ql/src/test/queries/clientnegative/temp_table_rename.q new file mode 100644 index 0000000..336ccb1 --- /dev/null +++ ql/src/test/queries/clientnegative/temp_table_rename.q @@ -0,0 +1,3 @@ +create temporary table tmp1 (c1 string); +create temporary table tmp2 (d1 string); +alter table tmp2 rename to tmp1; diff --git ql/src/test/queries/clientpositive/show_create_table_temp_table.q ql/src/test/queries/clientpositive/show_create_table_temp_table.q new file mode 100644 index 0000000..19c2c3e --- /dev/null +++ ql/src/test/queries/clientpositive/show_create_table_temp_table.q @@ -0,0 +1,6 @@ + +create database tmpdb; +create temporary table tmpdb.tmp1 (c1 string, c2 string); +show create table tmpdb.tmp1; +drop table tmp1; +drop database tmpdb; diff --git ql/src/test/queries/clientpositive/stats19.q ql/src/test/queries/clientpositive/stats19.q index 51514bd..b23b1c3 100644 --- ql/src/test/queries/clientpositive/stats19.q +++ ql/src/test/queries/clientpositive/stats19.q @@ -17,7 +17,7 @@ set hive.stats.key.prefix.max.length=0; -- The stats key should be hashed since the max length is too small insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src; -set hive.stats.key.prefix.max.length=200; +set hive.stats.key.prefix.max.length=250; -- The stats key should not be hashed since the max length is large enough insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src; @@ -41,7 +41,7 @@ insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select desc formatted stats_part partition (ds='2010-04-08', hr = '13'); -set hive.stats.key.prefix.max.length=200; +set hive.stats.key.prefix.max.length=250; -- The stats key should not be hashed since the max length is large enough insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src; @@ -66,7 +66,7 @@ set hive.stats.key.prefix.max.length=0; -- The stats key should be hashed since the max length is too small insert overwrite table stats_part partition (ds='2010-04-08', hr) select key, value, '13' from src; -set hive.stats.key.prefix.max.length=200; +set hive.stats.key.prefix.max.length=250; -- The stats key should not be hashed since the max length is large enough insert overwrite table stats_part partition (ds='2010-04-08', hr) select key, value, '13' from src; diff --git ql/src/test/queries/clientpositive/temp_table.q ql/src/test/queries/clientpositive/temp_table.q new file mode 100644 index 0000000..9667070 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table.q @@ -0,0 +1,43 @@ +EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0; +CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0; + +EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1; +CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1; + +DESCRIBE foo; +DESCRIBE bar; + +explain select * from foo limit 10; +select * from foo limit 10; + +explain select * from (select * from foo union all select * from bar) u order by key limit 10; +select * from (select * from foo union all select * from bar) u order by key limit 10; + +CREATE TEMPORARY TABLE baz LIKE foo; + +INSERT OVERWRITE TABLE baz SELECT * from foo; + +CREATE TEMPORARY TABLE bay (key string, value string) STORED AS orc; +select * from bay; + +INSERT OVERWRITE TABLE bay SELECT * FROM src ORDER BY key; + +select * from bay limit 10; + +SHOW TABLES; + +CREATE DATABASE two; + +USE two; + +SHOW TABLES; + +CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo; + +SHOW TABLES; + +use default; + +DROP DATABASE two CASCADE; + +DROP TABLE bay; diff --git ql/src/test/queries/clientpositive/temp_table_external.q ql/src/test/queries/clientpositive/temp_table_external.q new file mode 100644 index 0000000..f12a7b3 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_external.q @@ -0,0 +1,13 @@ + +dfs ${system:test.dfs.mkdir} hdfs:///tmp/temp_table_external; +dfs -copyFromLocal ../../data/files/in1.txt hdfs:///tmp/temp_table_external/; +dfs -ls hdfs:///tmp/temp_table_external/; + +create temporary external table temp_table_external (c1 int, c2 int) location 'hdfs:///tmp/temp_table_external'; +select * from temp_table_external; + +-- Even after we drop the table, the data directory should still be there +drop table temp_table_external; +dfs -ls hdfs:///tmp/temp_table_external/; + +dfs -rmr hdfs:///tmp/temp_table_external; diff --git ql/src/test/queries/clientpositive/temp_table_gb1.q ql/src/test/queries/clientpositive/temp_table_gb1.q new file mode 100644 index 0000000..a7e40b7 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_gb1.q @@ -0,0 +1,16 @@ + +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +set hive.map.aggr=false; +set hive.groupby.skewindata=true; + +-- Taken from groupby2.q +CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE; +CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src; + +FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1); + +SELECT dest_g2.* FROM dest_g2; + +DROP TABLE dest_g2; +DROP TABLE src_temp; diff --git ql/src/test/queries/clientpositive/temp_table_join1.q ql/src/test/queries/clientpositive/temp_table_join1.q new file mode 100644 index 0000000..95d5ad9 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_join1.q @@ -0,0 +1,30 @@ + +CREATE TABLE src_nontemp AS SELECT * FROM src limit 10; +CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10; + +-- Non temp table join +EXPLAIN +FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value; + +FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value; + +-- Non temp table join with temp table +EXPLAIN +FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value; + +FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value; + +-- temp table join with temp table +EXPLAIN +FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value; + +FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value; + +DROP TABLE src_nontemp; +DROP TABLE src_temp; diff --git ql/src/test/queries/clientpositive/temp_table_names.q ql/src/test/queries/clientpositive/temp_table_names.q new file mode 100644 index 0000000..bac26d3 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_names.q @@ -0,0 +1,13 @@ + +-- Test temp tables with upper/lower case names +create temporary table Default.Temp_Table_Names (C1 string, c2 string); + +show tables 'Temp_Table*'; +show tables in default 'temp_table_names'; +show tables in DEFAULT 'TEMP_TABLE_NAMES'; + +select c1 from default.temp_table_names; +select C1 from DEFAULT.TEMP_TABLE_NAMES; + +drop table Default.TEMP_TABLE_names; +show tables 'temp_table_names'; diff --git ql/src/test/queries/clientpositive/temp_table_options1.q ql/src/test/queries/clientpositive/temp_table_options1.q new file mode 100644 index 0000000..b336267 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_options1.q @@ -0,0 +1,112 @@ +-- Delimiter test, taken from delimiter.q +create temporary table impressions (imp string, msg string) +row format delimited +fields terminated by '\t' +lines terminated by '\n' +stored as textfile; +LOAD DATA LOCAL INPATH '../../data/files/in7.txt' INTO TABLE impressions; + +select * from impressions; + +select imp,msg from impressions; + +drop table impressions; + + +-- Try different SerDe formats, taken from date_serde.q + +-- +-- RegexSerDe +-- +create temporary table date_serde_regex ( + ORIGIN_CITY_NAME string, + DEST_CITY_NAME string, + FL_DATE date, + ARR_DELAY float, + FL_NUM int +) +row format serde 'org.apache.hadoop.hive.serde2.RegexSerDe' +with serdeproperties ( + "input.regex" = "([^]*)([^]*)([^]*)([^]*)([0-9]*)" +) +stored as textfile; + +load data local inpath '../../data/files/flights_tiny.txt.1' overwrite into table date_serde_regex; + +select * from date_serde_regex; +select fl_date, count(*) from date_serde_regex group by fl_date; + +-- +-- LazyBinary +-- +create temporary table date_serde_lb ( + c1 date, + c2 int +); +alter table date_serde_lb set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe'; + +insert overwrite table date_serde_lb + select fl_date, fl_num from date_serde_regex limit 1; + +select * from date_serde_lb; +select c1, sum(c2) from date_serde_lb group by c1; + +-- +-- LazySimple +-- +create temporary table date_serde_ls ( + c1 date, + c2 int +); +alter table date_serde_ls set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'; + +insert overwrite table date_serde_ls + select c1, c2 from date_serde_lb limit 1; + +select * from date_serde_ls; +select c1, sum(c2) from date_serde_ls group by c1; + +-- +-- Columnar +-- +create temporary table date_serde_c ( + c1 date, + c2 int +) stored as rcfile; +alter table date_serde_c set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; + +insert overwrite table date_serde_c + select c1, c2 from date_serde_ls limit 1; + +select * from date_serde_c; +select c1, sum(c2) from date_serde_c group by c1; + +-- +-- LazyBinaryColumnar +-- +create temporary table date_serde_lbc ( + c1 date, + c2 int +) stored as rcfile; +alter table date_serde_lbc set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; + +insert overwrite table date_serde_lbc + select c1, c2 from date_serde_c limit 1; + +select * from date_serde_lbc; +select c1, sum(c2) from date_serde_lbc group by c1; + +-- +-- ORC +-- +create temporary table date_serde_orc ( + c1 date, + c2 int +) stored as orc; +alter table date_serde_orc set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'; + +insert overwrite table date_serde_orc + select c1, c2 from date_serde_lbc limit 1; + +select * from date_serde_orc; +select c1, sum(c2) from date_serde_orc group by c1; diff --git ql/src/test/queries/clientpositive/temp_table_precedence.q ql/src/test/queries/clientpositive/temp_table_precedence.q new file mode 100644 index 0000000..971af73 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_precedence.q @@ -0,0 +1,38 @@ + +create database ttp; + +-- Create non-temp tables +create table ttp.tab1 (a1 string, a2 string); +insert overwrite table ttp.tab1 select * from src where key = 5 limit 5; +describe ttp.tab1; +select * from ttp.tab1; +create table ttp.tab2 (b1 string, b2 string); +insert overwrite table ttp.tab2 select * from src where key = 2 limit 5; +describe ttp.tab2; +select * from ttp.tab2; + +-- Now create temp table with same name +create temporary table ttp.tab1 (c1 int, c2 string); +insert overwrite table ttp.tab1 select * from src where key = 0 limit 5; + +-- describe/select should now use temp table +describe ttp.tab1; +select * from ttp.tab1; + +-- rename the temp table, and now we can see our non-temp table again +use ttp; +alter table tab1 rename to tab2; +use default; +describe ttp.tab1; +select * from ttp.tab1; + +-- now the non-temp tab2 should be hidden +describe ttp.tab2; +select * from ttp.tab2; + +-- drop the temp table, and now we should be able to see the non-temp tab2 again +drop table ttp.tab2; +describe ttp.tab2; +select * from ttp.tab2; + +drop database ttp cascade; diff --git ql/src/test/queries/clientpositive/temp_table_subquery1.q ql/src/test/queries/clientpositive/temp_table_subquery1.q new file mode 100644 index 0000000..ecf53e9 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_subquery1.q @@ -0,0 +1,29 @@ + +create temporary table src_temp as select * from src; + +-- subquery exists +select * +from src_temp b +where exists + (select a.key + from src_temp a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +; + +-- subquery in +select * +from src_temp +where src_temp.key in (select key from src_temp s1 where s1.key > '9') +; + +select b.key, min(b.value) +from src_temp b +group by b.key +having b.key in ( select a.key + from src_temp a + where a.value > 'val_9' and a.value = min(b.value) + ) +; + +drop table src_temp; diff --git ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q new file mode 100644 index 0000000..0ad80c5 --- /dev/null +++ ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q @@ -0,0 +1,70 @@ +DROP TABLE part; + +-- data setup +CREATE TEMPORARY TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +); + +LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; + +drop table over10k; + +create temporary table over10k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) + row format delimited + fields terminated by '|'; + +load data local inpath '../../data/files/over10k' into table over10k; + +select p_mfgr, p_retailprice, p_size, +round(sum(p_retailprice) over w1 , 2) = round(sum(lag(p_retailprice,1,0.0)) over w1 + last_value(p_retailprice) over w1 , 2), +max(p_retailprice) over w1 - min(p_retailprice) over w1 = last_value(p_retailprice) over w1 - first_value(p_retailprice) over w1 +from part +window w1 as (distribute by p_mfgr sort by p_retailprice) +; +select p_mfgr, p_retailprice, p_size, +rank() over (distribute by p_mfgr sort by p_retailprice) as r, +sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between unbounded preceding and current row) as s2, +sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between unbounded preceding and current row) -5 as s1 +from part +; + +select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100; +select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100; +select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100; +select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100; + +select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mfgr) from part; + +select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part; + +-- multi table insert test +create table t1 (a1 int, b1 string); +create table t2 (a1 int, b1 string); +from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * ; +select * from t1 limit 3; +select * from t2 limit 3; + +select p_mfgr, p_retailprice, p_size, +round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2) +from part +window w1 as (distribute by p_mfgr sort by p_retailprice) +limit 11; diff --git ql/src/test/results/clientnegative/temp_table_authorize_create_tbl.q.out ql/src/test/results/clientnegative/temp_table_authorize_create_tbl.q.out new file mode 100644 index 0000000..ec75b1d --- /dev/null +++ ql/src/test/results/clientnegative/temp_table_authorize_create_tbl.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create database db23221 +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database db23221 +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: use db23221 +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use db23221 +POSTHOOK: type: SWITCHDATABASE +FAILED: HiveAccessControlException Permission denied. Principal [name=user44, type=USER] does not have following privileges on Object [type=DATABASE, name=db23221] : [OBJECT OWNERSHIP] diff --git ql/src/test/results/clientnegative/temp_table_column_stats.q.out ql/src/test/results/clientnegative/temp_table_column_stats.q.out new file mode 100644 index 0000000..4b0c0bc --- /dev/null +++ ql/src/test/results/clientnegative/temp_table_column_stats.q.out @@ -0,0 +1,18 @@ +PREHOOK: query: create temporary table tmp1 (c1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create temporary table tmp1 (c1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmp1 +PREHOOK: query: -- table-level stats should work +analyze table tmp1 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@tmp1 +PREHOOK: Output: default@tmp1 +POSTHOOK: query: -- table-level stats should work +analyze table tmp1 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tmp1 +POSTHOOK: Output: default@tmp1 +FAILED: SemanticException tmp1 is a temporary table. Column statistics are not supported on temporary tables. diff --git ql/src/test/results/clientnegative/temp_table_create_like_partitions.q.out ql/src/test/results/clientnegative/temp_table_create_like_partitions.q.out new file mode 100644 index 0000000..d7b0e6d --- /dev/null +++ ql/src/test/results/clientnegative/temp_table_create_like_partitions.q.out @@ -0,0 +1,15 @@ +PREHOOK: query: create table tab1 (c1 string) partitioned by (p1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table tab1 (c1 string) partitioned by (p1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab1 +PREHOOK: query: create table tmp1 like tab1 +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table tmp1 like tab1 +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmp1 +FAILED: ParseException line 2:11 mismatched input 'create' expecting EXTENDED near 'table' in show statement diff --git ql/src/test/results/clientnegative/temp_table_index.q.out ql/src/test/results/clientnegative/temp_table_index.q.out new file mode 100644 index 0000000..8ec5c0a --- /dev/null +++ ql/src/test/results/clientnegative/temp_table_index.q.out @@ -0,0 +1,10 @@ +PREHOOK: query: create temporary table tmp1 (c1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create temporary table tmp1 (c1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmp1 +PREHOOK: query: create index tmp1_idx on table tmp1 (c1) as 'COMPACT' with deferred rebuild +PREHOOK: type: CREATEINDEX +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: tableName=tmp1 is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported. diff --git ql/src/test/results/clientnegative/temp_table_partitions.q.out ql/src/test/results/clientnegative/temp_table_partitions.q.out new file mode 100644 index 0000000..c213318 --- /dev/null +++ ql/src/test/results/clientnegative/temp_table_partitions.q.out @@ -0,0 +1 @@ +FAILED: SemanticException Partition columns are not supported on temporary tables diff --git ql/src/test/results/clientnegative/temp_table_rename.q.out ql/src/test/results/clientnegative/temp_table_rename.q.out new file mode 100644 index 0000000..c8d4861 --- /dev/null +++ ql/src/test/results/clientnegative/temp_table_rename.q.out @@ -0,0 +1,19 @@ +PREHOOK: query: create temporary table tmp1 (c1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create temporary table tmp1 (c1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmp1 +PREHOOK: query: create temporary table tmp2 (d1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create temporary table tmp2 (d1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmp2 +PREHOOK: query: alter table tmp2 rename to tmp1 +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: default@tmp2 +PREHOOK: Output: default@tmp2 +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. diff --git ql/src/test/results/clientpositive/nullformat.q.out ql/src/test/results/clientpositive/nullformat.q.out index d311825..07dae64 100644 --- ql/src/test/results/clientpositive/nullformat.q.out +++ ql/src/test/results/clientpositive/nullformat.q.out @@ -76,7 +76,7 @@ PREHOOK: Input: default@null_tab1 POSTHOOK: query: SHOW CREATE TABLE null_tab1 POSTHOOK: type: SHOW_CREATETABLE POSTHOOK: Input: default@null_tab1 -CREATE TABLE `null_tab1`( +CREATE TABLE `null_tab1`( `a` string, `b` string) ROW FORMAT DELIMITED diff --git ql/src/test/results/clientpositive/nullformatCTAS.q.out ql/src/test/results/clientpositive/nullformatCTAS.q.out index cab23d5..c76c30b 100644 --- ql/src/test/results/clientpositive/nullformatCTAS.q.out +++ ql/src/test/results/clientpositive/nullformatCTAS.q.out @@ -155,7 +155,7 @@ PREHOOK: Input: default@null_tab3 POSTHOOK: query: SHOW CREATE TABLE null_tab3 POSTHOOK: type: SHOW_CREATETABLE POSTHOOK: Input: default@null_tab3 -CREATE TABLE `null_tab3`( +CREATE TABLE `null_tab3`( `a` string, `b` string) ROW FORMAT DELIMITED diff --git ql/src/test/results/clientpositive/show_create_table_alter.q.out ql/src/test/results/clientpositive/show_create_table_alter.q.out index 206f4f8..3cf552f 100644 --- ql/src/test/results/clientpositive/show_create_table_alter.q.out +++ ql/src/test/results/clientpositive/show_create_table_alter.q.out @@ -51,7 +51,7 @@ PREHOOK: Input: default@tmp_showcrt1 POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 POSTHOOK: type: SHOW_CREATETABLE POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +CREATE TABLE `tmp_showcrt1`( `key` smallint, `value` float) COMMENT 'temporary table' diff --git ql/src/test/results/clientpositive/show_create_table_db_table.q.out ql/src/test/results/clientpositive/show_create_table_db_table.q.out index 528dd36..0119471 100644 --- ql/src/test/results/clientpositive/show_create_table_db_table.q.out +++ ql/src/test/results/clientpositive/show_create_table_db_table.q.out @@ -29,7 +29,7 @@ PREHOOK: Input: tmp_feng@tmp_showcrt POSTHOOK: query: SHOW CREATE TABLE tmp_feng.tmp_showcrt POSTHOOK: type: SHOW_CREATETABLE POSTHOOK: Input: tmp_feng@tmp_showcrt -CREATE TABLE `tmp_feng.tmp_showcrt`( +CREATE TABLE `tmp_feng.tmp_showcrt`( `key` string, `value` int) ROW FORMAT SERDE diff --git ql/src/test/results/clientpositive/show_create_table_delimited.q.out ql/src/test/results/clientpositive/show_create_table_delimited.q.out index d4ffd53..07d08ac 100644 --- ql/src/test/results/clientpositive/show_create_table_delimited.q.out +++ ql/src/test/results/clientpositive/show_create_table_delimited.q.out @@ -23,7 +23,7 @@ PREHOOK: Input: default@tmp_showcrt1 POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 POSTHOOK: type: SHOW_CREATETABLE POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +CREATE TABLE `tmp_showcrt1`( `key` int, `value` string, `newvalue` bigint) diff --git ql/src/test/results/clientpositive/show_create_table_serde.q.out ql/src/test/results/clientpositive/show_create_table_serde.q.out index a9e92b4..a7500ba 100644 --- ql/src/test/results/clientpositive/show_create_table_serde.q.out +++ ql/src/test/results/clientpositive/show_create_table_serde.q.out @@ -25,7 +25,7 @@ PREHOOK: Input: default@tmp_showcrt1 POSTHOOK: query: SHOW CREATE TABLE tmp_showcrt1 POSTHOOK: type: SHOW_CREATETABLE POSTHOOK: Input: default@tmp_showcrt1 -CREATE TABLE `tmp_showcrt1`( +CREATE TABLE `tmp_showcrt1`( `key` int, `value` string, `newvalue` bigint) diff --git ql/src/test/results/clientpositive/show_create_table_temp_table.q.out ql/src/test/results/clientpositive/show_create_table_temp_table.q.out new file mode 100644 index 0000000..6306d90 --- /dev/null +++ ql/src/test/results/clientpositive/show_create_table_temp_table.q.out @@ -0,0 +1,42 @@ +PREHOOK: query: create database tmpdb +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database tmpdb +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: create temporary table tmpdb.tmp1 (c1 string, c2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:tmpdb +POSTHOOK: query: create temporary table tmpdb.tmp1 (c1 string, c2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:tmpdb +POSTHOOK: Output: tmpdb@tmp1 +PREHOOK: query: show create table tmpdb.tmp1 +PREHOOK: type: SHOW_CREATETABLE +PREHOOK: Input: tmpdb@tmp1 +POSTHOOK: query: show create table tmpdb.tmp1 +POSTHOOK: type: SHOW_CREATETABLE +POSTHOOK: Input: tmpdb@tmp1 +CREATE TEMPORARY TABLE `tmpdb.tmp1`( + `c1` string, + `c2` string) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +STORED AS INPUTFORMAT + 'org.apache.hadoop.mapred.TextInputFormat' +OUTPUTFORMAT + 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' +LOCATION +#### A masked pattern was here #### +TBLPROPERTIES ( +) +PREHOOK: query: drop table tmp1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table tmp1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop database tmpdb +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:tmpdb +PREHOOK: Output: database:tmpdb +POSTHOOK: query: drop database tmpdb +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:tmpdb +POSTHOOK: Output: database:tmpdb diff --git ql/src/test/results/clientpositive/temp_table.q.out ql/src/test/results/clientpositive/temp_table.q.out new file mode 100644 index 0000000..fbb674f --- /dev/null +++ ql/src/test/results/clientpositive/temp_table.q.out @@ -0,0 +1,477 @@ +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-8 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-8 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 0) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-8 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: foo + isTemporary: true + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@foo +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-8 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-8 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 1) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-8 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: bar + isTemporary: true + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bar +PREHOOK: query: DESCRIBE foo +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@foo +POSTHOOK: query: DESCRIBE foo +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@foo +key string +value string +PREHOOK: query: DESCRIBE bar +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@bar +POSTHOOK: query: DESCRIBE bar +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@bar +key string +value string +PREHOOK: query: explain select * from foo limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from foo limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: foo + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select * from foo limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from foo limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +238 val_238 +86 val_86 +278 val_278 +98 val_98 +484 val_484 +150 val_150 +224 val_224 +66 val_66 +128 val_128 +146 val_146 +PREHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: bar + Statistics: Num rows: 253 Data size: 2703 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 253 Data size: 2703 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + TableScan + alias: foo + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: string) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bar +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bar +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@baz +PREHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +PREHOOK: Output: default@baz +POSTHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +POSTHOOK: Output: default@baz +POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TEMPORARY TABLE bay (key string, value string) STORED AS orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY TABLE bay (key string, value string) STORED AS orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bay +PREHOOK: query: select * from bay +PREHOOK: type: QUERY +PREHOOK: Input: default@bay +#### A masked pattern was here #### +POSTHOOK: query: select * from bay +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bay +#### A masked pattern was here #### +PREHOOK: query: INSERT OVERWRITE TABLE bay SELECT * FROM src ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bay +POSTHOOK: query: INSERT OVERWRITE TABLE bay SELECT * FROM src ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bay +POSTHOOK: Lineage: bay.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bay.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from bay limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bay +#### A masked pattern was here #### +POSTHOOK: query: select * from bay limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bay +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +alltypesorc +bar +bay +baz +foo +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: CREATE DATABASE two +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: CREATE DATABASE two +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE two +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE two +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@foo +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@foo +POSTHOOK: Output: two@foo +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +foo +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE two CASCADE +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:two +PREHOOK: Output: database:two +PREHOOK: Output: two@foo +POSTHOOK: query: DROP DATABASE two CASCADE +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:two +POSTHOOK: Output: database:two +POSTHOOK: Output: two@foo +PREHOOK: query: DROP TABLE bay +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bay +PREHOOK: Output: default@bay +POSTHOOK: query: DROP TABLE bay +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bay +POSTHOOK: Output: default@bay diff --git ql/src/test/results/clientpositive/temp_table_external.q.out ql/src/test/results/clientpositive/temp_table_external.q.out new file mode 100644 index 0000000..9973bce --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_external.q.out @@ -0,0 +1,33 @@ +Found 1 items +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@temp_table_external +PREHOOK: query: select * from temp_table_external +PREHOOK: type: QUERY +PREHOOK: Input: default@temp_table_external +#### A masked pattern was here #### +POSTHOOK: query: select * from temp_table_external +POSTHOOK: type: QUERY +POSTHOOK: Input: default@temp_table_external +#### A masked pattern was here #### +NULL 35 +48 NULL +100 100 +PREHOOK: query: -- Even after we drop the table, the data directory should still be there +drop table temp_table_external +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@temp_table_external +PREHOOK: Output: default@temp_table_external +POSTHOOK: query: -- Even after we drop the table, the data directory should still be there +drop table temp_table_external +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@temp_table_external +POSTHOOK: Output: default@temp_table_external +Found 1 items +#### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/temp_table_gb1.q.out ql/src/test/results/clientpositive/temp_table_gb1.q.out new file mode 100644 index 0000000..f3032e0 --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_gb1.q.out @@ -0,0 +1,63 @@ +PREHOOK: query: -- Taken from groupby2.q +CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- Taken from groupby2.q +CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dest_g2 +PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_temp +PREHOOK: query: FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_temp +PREHOOK: Output: default@dest_g2 +POSTHOOK: query: FROM src_temp +INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_temp +POSTHOOK: Output: default@dest_g2 +POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), (src_temp)src_temp.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src_temp)src_temp.FieldSchema(name:key, type:string, comment:null), ] +PREHOOK: query: SELECT dest_g2.* FROM dest_g2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest_g2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest_g2 +#### A masked pattern was here #### +0 1 00.0 +1 71 116414.0 +2 69 225571.0 +3 62 332004.0 +4 74 452763.0 +5 6 5397.0 +6 5 6398.0 +7 6 7735.0 +8 8 8762.0 +9 7 91047.0 +PREHOOK: query: DROP TABLE dest_g2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dest_g2 +PREHOOK: Output: default@dest_g2 +POSTHOOK: query: DROP TABLE dest_g2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dest_g2 +POSTHOOK: Output: default@dest_g2 +PREHOOK: query: DROP TABLE src_temp +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_temp +PREHOOK: Output: default@src_temp +POSTHOOK: query: DROP TABLE src_temp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_temp +POSTHOOK: Output: default@src_temp diff --git ql/src/test/results/clientpositive/temp_table_join1.q.out ql/src/test/results/clientpositive/temp_table_join1.q.out new file mode 100644 index 0000000..2aa244e --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_join1.q.out @@ -0,0 +1,296 @@ +PREHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_nontemp +PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_temp +PREHOOK: query: -- Non temp table join +EXPLAIN +FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +PREHOOK: type: QUERY +POSTHOOK: query: -- Non temp table join +EXPLAIN +FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src2 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + TableScan + alias: src1 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 {VALUE._col0} + outputColumnNames: _col0, _col5 + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col5 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src_nontemp +#### A masked pattern was here #### +POSTHOOK: query: FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_nontemp +#### A masked pattern was here #### +165 val_165 +238 val_238 +255 val_255 +27 val_27 +278 val_278 +311 val_311 +409 val_409 +484 val_484 +86 val_86 +98 val_98 +PREHOOK: query: -- Non temp table join with temp table +EXPLAIN +FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +PREHOOK: type: QUERY +POSTHOOK: query: -- Non temp table join with temp table +EXPLAIN +FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src2 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + TableScan + alias: src1 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 {VALUE._col0} + outputColumnNames: _col0, _col5 + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col5 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src_nontemp +PREHOOK: Input: default@src_temp +#### A masked pattern was here #### +POSTHOOK: query: FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_nontemp +POSTHOOK: Input: default@src_temp +#### A masked pattern was here #### +165 val_165 +238 val_238 +255 val_255 +27 val_27 +278 val_278 +311 val_311 +409 val_409 +484 val_484 +86 val_86 +98 val_98 +PREHOOK: query: -- temp table join with temp table +EXPLAIN +FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +PREHOOK: type: QUERY +POSTHOOK: query: -- temp table join with temp table +EXPLAIN +FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src2 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + TableScan + alias: src1 + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 {VALUE._col0} + outputColumnNames: _col0, _col5 + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col5 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src_temp +#### A masked pattern was here #### +POSTHOOK: query: FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) +SELECT src1.key, src2.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_temp +#### A masked pattern was here #### +165 val_165 +238 val_238 +255 val_255 +27 val_27 +278 val_278 +311 val_311 +409 val_409 +484 val_484 +86 val_86 +98 val_98 +PREHOOK: query: DROP TABLE src_nontemp +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_nontemp +PREHOOK: Output: default@src_nontemp +POSTHOOK: query: DROP TABLE src_nontemp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_nontemp +POSTHOOK: Output: default@src_nontemp +PREHOOK: query: DROP TABLE src_temp +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_temp +PREHOOK: Output: default@src_temp +POSTHOOK: query: DROP TABLE src_temp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_temp +POSTHOOK: Output: default@src_temp diff --git ql/src/test/results/clientpositive/temp_table_names.q.out ql/src/test/results/clientpositive/temp_table_names.q.out new file mode 100644 index 0000000..940684c --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_names.q.out @@ -0,0 +1,49 @@ +PREHOOK: query: -- Test temp tables with upper/lower case names +create temporary table Default.Temp_Table_Names (C1 string, c2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- Test temp tables with upper/lower case names +create temporary table Default.Temp_Table_Names (C1 string, c2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: Default@Temp_Table_Names +POSTHOOK: Output: database:default +PREHOOK: query: show tables 'Temp_Table*' +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables 'Temp_Table*' +POSTHOOK: type: SHOWTABLES +temp_table_names +PREHOOK: query: show tables in default 'temp_table_names' +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables in default 'temp_table_names' +POSTHOOK: type: SHOWTABLES +temp_table_names +PREHOOK: query: show tables in DEFAULT 'TEMP_TABLE_NAMES' +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables in DEFAULT 'TEMP_TABLE_NAMES' +POSTHOOK: type: SHOWTABLES +temp_table_names +PREHOOK: query: select c1 from default.temp_table_names +PREHOOK: type: QUERY +PREHOOK: Input: default@temp_table_names +#### A masked pattern was here #### +POSTHOOK: query: select c1 from default.temp_table_names +POSTHOOK: type: QUERY +POSTHOOK: Input: default@temp_table_names +#### A masked pattern was here #### +PREHOOK: query: select C1 from DEFAULT.TEMP_TABLE_NAMES +PREHOOK: type: QUERY +PREHOOK: Input: default@temp_table_names +#### A masked pattern was here #### +POSTHOOK: query: select C1 from DEFAULT.TEMP_TABLE_NAMES +POSTHOOK: type: QUERY +POSTHOOK: Input: default@temp_table_names +#### A masked pattern was here #### +PREHOOK: query: drop table Default.TEMP_TABLE_names +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table Default.TEMP_TABLE_names +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: default@temp_table_names +PREHOOK: query: show tables 'temp_table_names' +PREHOOK: type: SHOWTABLES +POSTHOOK: query: show tables 'temp_table_names' +POSTHOOK: type: SHOWTABLES diff --git ql/src/test/results/clientpositive/temp_table_options1.q.out ql/src/test/results/clientpositive/temp_table_options1.q.out new file mode 100644 index 0000000..994681f --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_options1.q.out @@ -0,0 +1,552 @@ +PREHOOK: query: -- Delimiter test, taken from delimiter.q +create temporary table impressions (imp string, msg string) +row format delimited +fields terminated by '\t' +lines terminated by '\n' +stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- Delimiter test, taken from delimiter.q +create temporary table impressions (imp string, msg string) +row format delimited +fields terminated by '\t' +lines terminated by '\n' +stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@impressions +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in7.txt' INTO TABLE impressions +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@impressions +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in7.txt' INTO TABLE impressions +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@impressions +PREHOOK: query: select * from impressions +PREHOOK: type: QUERY +PREHOOK: Input: default@impressions +#### A masked pattern was here #### +POSTHOOK: query: select * from impressions +POSTHOOK: type: QUERY +POSTHOOK: Input: default@impressions +#### A masked pattern was here #### +35 40 +48 32 +100100 40 +PREHOOK: query: select imp,msg from impressions +PREHOOK: type: QUERY +PREHOOK: Input: default@impressions +#### A masked pattern was here #### +POSTHOOK: query: select imp,msg from impressions +POSTHOOK: type: QUERY +POSTHOOK: Input: default@impressions +#### A masked pattern was here #### +35 40 +48 32 +100100 40 +PREHOOK: query: drop table impressions +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@impressions +PREHOOK: Output: default@impressions +POSTHOOK: query: drop table impressions +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@impressions +POSTHOOK: Output: default@impressions +PREHOOK: query: -- Try different SerDe formats, taken from date_serde.q + +-- +-- RegexSerDe +-- +create temporary table date_serde_regex ( + ORIGIN_CITY_NAME string, + DEST_CITY_NAME string, + FL_DATE date, + ARR_DELAY float, + FL_NUM int +) +row format serde 'org.apache.hadoop.hive.serde2.RegexSerDe' +with serdeproperties ( + "input.regex" = "([^]*)([^]*)([^]*)([^]*)([0-9]*)" +) +stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- Try different SerDe formats, taken from date_serde.q + +-- +-- RegexSerDe +-- +create temporary table date_serde_regex ( + ORIGIN_CITY_NAME string, + DEST_CITY_NAME string, + FL_DATE date, + ARR_DELAY float, + FL_NUM int +) +row format serde 'org.apache.hadoop.hive.serde2.RegexSerDe' +with serdeproperties ( + "input.regex" = "([^]*)([^]*)([^]*)([^]*)([0-9]*)" +) +stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_serde_regex +PREHOOK: query: load data local inpath '../../data/files/flights_tiny.txt.1' overwrite into table date_serde_regex +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@date_serde_regex +POSTHOOK: query: load data local inpath '../../data/files/flights_tiny.txt.1' overwrite into table date_serde_regex +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@date_serde_regex +PREHOOK: query: select * from date_serde_regex +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_regex +#### A masked pattern was here #### +POSTHOOK: query: select * from date_serde_regex +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_regex +#### A masked pattern was here #### +Baltimore New York 2010-10-20 -30.0 1064 +Baltimore New York 2010-10-20 23.0 1142 +Baltimore New York 2010-10-20 6.0 1599 +Chicago New York 2010-10-20 42.0 361 +Chicago New York 2010-10-20 24.0 897 +Chicago New York 2010-10-20 15.0 1531 +Chicago New York 2010-10-20 -6.0 1610 +Chicago New York 2010-10-20 -2.0 3198 +Baltimore New York 2010-10-21 17.0 1064 +Baltimore New York 2010-10-21 105.0 1142 +Baltimore New York 2010-10-21 28.0 1599 +Chicago New York 2010-10-21 142.0 361 +Chicago New York 2010-10-21 77.0 897 +Chicago New York 2010-10-21 53.0 1531 +Chicago New York 2010-10-21 -5.0 1610 +Chicago New York 2010-10-21 51.0 3198 +Baltimore New York 2010-10-22 -12.0 1064 +Baltimore New York 2010-10-22 54.0 1142 +Baltimore New York 2010-10-22 18.0 1599 +Chicago New York 2010-10-22 2.0 361 +Chicago New York 2010-10-22 24.0 897 +Chicago New York 2010-10-22 16.0 1531 +Chicago New York 2010-10-22 -6.0 1610 +Chicago New York 2010-10-22 -11.0 3198 +Baltimore New York 2010-10-23 18.0 272 +Baltimore New York 2010-10-23 -10.0 1805 +Baltimore New York 2010-10-23 6.0 3171 +Chicago New York 2010-10-23 3.0 384 +Chicago New York 2010-10-23 32.0 426 +Chicago New York 2010-10-23 1.0 650 +Chicago New York 2010-10-23 11.0 3085 +Baltimore New York 2010-10-24 12.0 1599 +Baltimore New York 2010-10-24 20.0 2571 +Chicago New York 2010-10-24 10.0 361 +Chicago New York 2010-10-24 113.0 897 +Chicago New York 2010-10-24 -5.0 1531 +Chicago New York 2010-10-24 -17.0 1610 +Chicago New York 2010-10-24 -3.0 3198 +Baltimore New York 2010-10-25 -25.0 1064 +Baltimore New York 2010-10-25 92.0 1142 +Baltimore New York 2010-10-25 106.0 1599 +Chicago New York 2010-10-25 31.0 361 +Chicago New York 2010-10-25 -1.0 897 +Chicago New York 2010-10-25 43.0 1531 +Chicago New York 2010-10-25 6.0 1610 +Chicago New York 2010-10-25 -16.0 3198 +Baltimore New York 2010-10-26 -22.0 1064 +Baltimore New York 2010-10-26 123.0 1142 +Baltimore New York 2010-10-26 90.0 1599 +Chicago New York 2010-10-26 12.0 361 +Chicago New York 2010-10-26 0.0 897 +Chicago New York 2010-10-26 29.0 1531 +Chicago New York 2010-10-26 -17.0 1610 +Chicago New York 2010-10-26 6.0 3198 +Baltimore New York 2010-10-27 -18.0 1064 +Baltimore New York 2010-10-27 49.0 1142 +Baltimore New York 2010-10-27 92.0 1599 +Chicago New York 2010-10-27 148.0 361 +Chicago New York 2010-10-27 -11.0 897 +Chicago New York 2010-10-27 70.0 1531 +Chicago New York 2010-10-27 8.0 1610 +Chicago New York 2010-10-27 21.0 3198 +Baltimore New York 2010-10-28 -4.0 1064 +Baltimore New York 2010-10-28 -14.0 1142 +Baltimore New York 2010-10-28 -14.0 1599 +Chicago New York 2010-10-28 2.0 361 +Chicago New York 2010-10-28 2.0 897 +Chicago New York 2010-10-28 -11.0 1531 +Chicago New York 2010-10-28 3.0 1610 +Chicago New York 2010-10-28 -18.0 3198 +Baltimore New York 2010-10-29 -24.0 1064 +Baltimore New York 2010-10-29 21.0 1142 +Baltimore New York 2010-10-29 -2.0 1599 +Chicago New York 2010-10-29 -12.0 361 +Chicago New York 2010-10-29 -11.0 897 +Chicago New York 2010-10-29 15.0 1531 +Chicago New York 2010-10-29 -18.0 1610 +Chicago New York 2010-10-29 -4.0 3198 +Baltimore New York 2010-10-30 14.0 272 +Baltimore New York 2010-10-30 -1.0 1805 +Baltimore New York 2010-10-30 5.0 3171 +Chicago New York 2010-10-30 -6.0 384 +Chicago New York 2010-10-30 -10.0 426 +Chicago New York 2010-10-30 -5.0 650 +Chicago New York 2010-10-30 -5.0 3085 +Baltimore New York 2010-10-31 -1.0 1599 +Baltimore New York 2010-10-31 -14.0 2571 +Chicago New York 2010-10-31 -25.0 361 +Chicago New York 2010-10-31 -18.0 897 +Chicago New York 2010-10-31 -4.0 1531 +Chicago New York 2010-10-31 -22.0 1610 +Chicago New York 2010-10-31 -15.0 3198 +Cleveland New York 2010-10-30 -23.0 2018 +Cleveland New York 2010-10-30 -12.0 2932 +Cleveland New York 2010-10-29 -4.0 2630 +Cleveland New York 2010-10-29 -19.0 2646 +Cleveland New York 2010-10-29 -12.0 3014 +Cleveland New York 2010-10-28 3.0 2630 +Cleveland New York 2010-10-28 -6.0 2646 +Cleveland New York 2010-10-28 1.0 3014 +Cleveland New York 2010-10-27 16.0 2630 +Cleveland New York 2010-10-27 27.0 3014 +Cleveland New York 2010-10-26 4.0 2630 +Cleveland New York 2010-10-26 -27.0 2646 +Cleveland New York 2010-10-26 -11.0 2662 +Cleveland New York 2010-10-26 13.0 3014 +Cleveland New York 2010-10-25 -4.0 2630 +Cleveland New York 2010-10-25 81.0 2646 +Cleveland New York 2010-10-25 42.0 3014 +Cleveland New York 2010-10-24 5.0 2254 +Cleveland New York 2010-10-24 -11.0 2630 +Cleveland New York 2010-10-24 -20.0 2646 +Cleveland New York 2010-10-24 -9.0 3014 +Cleveland New York 2010-10-23 -21.0 2932 +Cleveland New York 2010-10-22 1.0 2630 +Cleveland New York 2010-10-22 -25.0 2646 +Cleveland New York 2010-10-22 -3.0 3014 +Cleveland New York 2010-10-21 3.0 2630 +Cleveland New York 2010-10-21 29.0 2646 +Cleveland New York 2010-10-21 72.0 3014 +Cleveland New York 2010-10-20 -8.0 2630 +Cleveland New York 2010-10-20 -15.0 3014 +Washington New York 2010-10-23 -25.0 5832 +Washington New York 2010-10-23 -21.0 5904 +Washington New York 2010-10-23 -18.0 5917 +Washington New York 2010-10-30 -27.0 5904 +Washington New York 2010-10-30 -16.0 5917 +Washington New York 2010-10-20 -2.0 7291 +Washington New York 2010-10-21 22.0 7291 +Washington New York 2010-10-23 -16.0 7274 +Washington New York 2010-10-24 -26.0 7282 +Washington New York 2010-10-25 9.0 7291 +Washington New York 2010-10-26 4.0 7291 +Washington New York 2010-10-27 26.0 7291 +Washington New York 2010-10-28 45.0 7291 +Washington New York 2010-10-29 1.0 7291 +Washington New York 2010-10-31 -18.0 7282 +PREHOOK: query: select fl_date, count(*) from date_serde_regex group by fl_date +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_regex +#### A masked pattern was here #### +POSTHOOK: query: select fl_date, count(*) from date_serde_regex group by fl_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_regex +#### A masked pattern was here #### +2010-10-20 11 +2010-10-21 12 +2010-10-22 11 +2010-10-23 12 +2010-10-24 12 +2010-10-25 12 +2010-10-26 13 +2010-10-27 11 +2010-10-28 12 +2010-10-29 12 +2010-10-30 11 +2010-10-31 8 +PREHOOK: query: -- +-- LazyBinary +-- +create temporary table date_serde_lb ( + c1 date, + c2 int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- +-- LazyBinary +-- +create temporary table date_serde_lb ( + c1 date, + c2 int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_serde_lb +PREHOOK: query: alter table date_serde_lb set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@date_serde_lb +PREHOOK: Output: default@date_serde_lb +POSTHOOK: query: alter table date_serde_lb set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@date_serde_lb +POSTHOOK: Output: default@date_serde_lb +PREHOOK: query: insert overwrite table date_serde_lb + select fl_date, fl_num from date_serde_regex limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_regex +PREHOOK: Output: default@date_serde_lb +POSTHOOK: query: insert overwrite table date_serde_lb + select fl_date, fl_num from date_serde_regex limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_regex +POSTHOOK: Output: default@date_serde_lb +POSTHOOK: Lineage: date_serde_lb.c1 SIMPLE [(date_serde_regex)date_serde_regex.FieldSchema(name:fl_date, type:date, comment:), ] +POSTHOOK: Lineage: date_serde_lb.c2 SIMPLE [(date_serde_regex)date_serde_regex.FieldSchema(name:fl_num, type:int, comment:), ] +PREHOOK: query: select * from date_serde_lb +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_lb +#### A masked pattern was here #### +POSTHOOK: query: select * from date_serde_lb +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_lb +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: select c1, sum(c2) from date_serde_lb group by c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_lb +#### A masked pattern was here #### +POSTHOOK: query: select c1, sum(c2) from date_serde_lb group by c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_lb +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: -- +-- LazySimple +-- +create temporary table date_serde_ls ( + c1 date, + c2 int +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- +-- LazySimple +-- +create temporary table date_serde_ls ( + c1 date, + c2 int +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_serde_ls +PREHOOK: query: alter table date_serde_ls set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@date_serde_ls +PREHOOK: Output: default@date_serde_ls +POSTHOOK: query: alter table date_serde_ls set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@date_serde_ls +POSTHOOK: Output: default@date_serde_ls +PREHOOK: query: insert overwrite table date_serde_ls + select c1, c2 from date_serde_lb limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_lb +PREHOOK: Output: default@date_serde_ls +POSTHOOK: query: insert overwrite table date_serde_ls + select c1, c2 from date_serde_lb limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_lb +POSTHOOK: Output: default@date_serde_ls +POSTHOOK: Lineage: date_serde_ls.c1 SIMPLE [(date_serde_lb)date_serde_lb.FieldSchema(name:c1, type:date, comment:null), ] +POSTHOOK: Lineage: date_serde_ls.c2 SIMPLE [(date_serde_lb)date_serde_lb.FieldSchema(name:c2, type:int, comment:null), ] +PREHOOK: query: select * from date_serde_ls +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_ls +#### A masked pattern was here #### +POSTHOOK: query: select * from date_serde_ls +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_ls +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: select c1, sum(c2) from date_serde_ls group by c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_ls +#### A masked pattern was here #### +POSTHOOK: query: select c1, sum(c2) from date_serde_ls group by c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_ls +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: -- +-- Columnar +-- +create temporary table date_serde_c ( + c1 date, + c2 int +) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- +-- Columnar +-- +create temporary table date_serde_c ( + c1 date, + c2 int +) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_serde_c +PREHOOK: query: alter table date_serde_c set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@date_serde_c +PREHOOK: Output: default@date_serde_c +POSTHOOK: query: alter table date_serde_c set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@date_serde_c +POSTHOOK: Output: default@date_serde_c +PREHOOK: query: insert overwrite table date_serde_c + select c1, c2 from date_serde_ls limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_ls +PREHOOK: Output: default@date_serde_c +POSTHOOK: query: insert overwrite table date_serde_c + select c1, c2 from date_serde_ls limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_ls +POSTHOOK: Output: default@date_serde_c +POSTHOOK: Lineage: date_serde_c.c1 SIMPLE [(date_serde_ls)date_serde_ls.FieldSchema(name:c1, type:date, comment:null), ] +POSTHOOK: Lineage: date_serde_c.c2 SIMPLE [(date_serde_ls)date_serde_ls.FieldSchema(name:c2, type:int, comment:null), ] +PREHOOK: query: select * from date_serde_c +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_c +#### A masked pattern was here #### +POSTHOOK: query: select * from date_serde_c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_c +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: select c1, sum(c2) from date_serde_c group by c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_c +#### A masked pattern was here #### +POSTHOOK: query: select c1, sum(c2) from date_serde_c group by c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_c +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: -- +-- LazyBinaryColumnar +-- +create temporary table date_serde_lbc ( + c1 date, + c2 int +) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- +-- LazyBinaryColumnar +-- +create temporary table date_serde_lbc ( + c1 date, + c2 int +) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_serde_lbc +PREHOOK: query: alter table date_serde_lbc set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@date_serde_lbc +PREHOOK: Output: default@date_serde_lbc +POSTHOOK: query: alter table date_serde_lbc set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@date_serde_lbc +POSTHOOK: Output: default@date_serde_lbc +PREHOOK: query: insert overwrite table date_serde_lbc + select c1, c2 from date_serde_c limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_c +PREHOOK: Output: default@date_serde_lbc +POSTHOOK: query: insert overwrite table date_serde_lbc + select c1, c2 from date_serde_c limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_c +POSTHOOK: Output: default@date_serde_lbc +POSTHOOK: Lineage: date_serde_lbc.c1 SIMPLE [(date_serde_c)date_serde_c.FieldSchema(name:c1, type:date, comment:null), ] +POSTHOOK: Lineage: date_serde_lbc.c2 SIMPLE [(date_serde_c)date_serde_c.FieldSchema(name:c2, type:int, comment:null), ] +PREHOOK: query: select * from date_serde_lbc +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_lbc +#### A masked pattern was here #### +POSTHOOK: query: select * from date_serde_lbc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_lbc +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: select c1, sum(c2) from date_serde_lbc group by c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_lbc +#### A masked pattern was here #### +POSTHOOK: query: select c1, sum(c2) from date_serde_lbc group by c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_lbc +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: -- +-- ORC +-- +create temporary table date_serde_orc ( + c1 date, + c2 int +) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- +-- ORC +-- +create temporary table date_serde_orc ( + c1 date, + c2 int +) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_serde_orc +PREHOOK: query: alter table date_serde_orc set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +PREHOOK: type: ALTERTABLE_SERIALIZER +PREHOOK: Input: default@date_serde_orc +PREHOOK: Output: default@date_serde_orc +POSTHOOK: query: alter table date_serde_orc set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' +POSTHOOK: type: ALTERTABLE_SERIALIZER +POSTHOOK: Input: default@date_serde_orc +POSTHOOK: Output: default@date_serde_orc +PREHOOK: query: insert overwrite table date_serde_orc + select c1, c2 from date_serde_lbc limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_lbc +PREHOOK: Output: default@date_serde_orc +POSTHOOK: query: insert overwrite table date_serde_orc + select c1, c2 from date_serde_lbc limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_lbc +POSTHOOK: Output: default@date_serde_orc +POSTHOOK: Lineage: date_serde_orc.c1 SIMPLE [(date_serde_lbc)date_serde_lbc.FieldSchema(name:c1, type:date, comment:null), ] +POSTHOOK: Lineage: date_serde_orc.c2 SIMPLE [(date_serde_lbc)date_serde_lbc.FieldSchema(name:c2, type:int, comment:null), ] +PREHOOK: query: select * from date_serde_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from date_serde_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_orc +#### A masked pattern was here #### +2010-10-20 1064 +PREHOOK: query: select c1, sum(c2) from date_serde_orc group by c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_serde_orc +#### A masked pattern was here #### +POSTHOOK: query: select c1, sum(c2) from date_serde_orc group by c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_serde_orc +#### A masked pattern was here #### +2010-10-20 1064 diff --git ql/src/test/results/clientpositive/temp_table_precedence.q.out ql/src/test/results/clientpositive/temp_table_precedence.q.out new file mode 100644 index 0000000..1075b2c --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_precedence.q.out @@ -0,0 +1,211 @@ +PREHOOK: query: create database ttp +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database ttp +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: -- Create non-temp tables +create table ttp.tab1 (a1 string, a2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:ttp +POSTHOOK: query: -- Create non-temp tables +create table ttp.tab1 (a1 string, a2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:ttp +POSTHOOK: Output: ttp@tab1 +PREHOOK: query: insert overwrite table ttp.tab1 select * from src where key = 5 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: ttp@tab1 +POSTHOOK: query: insert overwrite table ttp.tab1 select * from src where key = 5 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: ttp@tab1 +POSTHOOK: Lineage: tab1.a1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tab1.a2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe ttp.tab1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: ttp@tab1 +POSTHOOK: query: describe ttp.tab1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: ttp@tab1 +a1 string +a2 string +PREHOOK: query: select * from ttp.tab1 +PREHOOK: type: QUERY +PREHOOK: Input: ttp@tab1 +#### A masked pattern was here #### +POSTHOOK: query: select * from ttp.tab1 +POSTHOOK: type: QUERY +POSTHOOK: Input: ttp@tab1 +#### A masked pattern was here #### +5 val_5 +5 val_5 +5 val_5 +PREHOOK: query: create table ttp.tab2 (b1 string, b2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:ttp +POSTHOOK: query: create table ttp.tab2 (b1 string, b2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:ttp +POSTHOOK: Output: ttp@tab2 +PREHOOK: query: insert overwrite table ttp.tab2 select * from src where key = 2 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: ttp@tab2 +POSTHOOK: query: insert overwrite table ttp.tab2 select * from src where key = 2 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: ttp@tab2 +POSTHOOK: Lineage: tab2.b1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tab2.b2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe ttp.tab2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: ttp@tab2 +POSTHOOK: query: describe ttp.tab2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: ttp@tab2 +b1 string +b2 string +PREHOOK: query: select * from ttp.tab2 +PREHOOK: type: QUERY +PREHOOK: Input: ttp@tab2 +#### A masked pattern was here #### +POSTHOOK: query: select * from ttp.tab2 +POSTHOOK: type: QUERY +POSTHOOK: Input: ttp@tab2 +#### A masked pattern was here #### +2 val_2 +PREHOOK: query: -- Now create temp table with same name +create temporary table ttp.tab1 (c1 int, c2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:ttp +POSTHOOK: query: -- Now create temp table with same name +create temporary table ttp.tab1 (c1 int, c2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:ttp +POSTHOOK: Output: ttp@tab1 +PREHOOK: query: insert overwrite table ttp.tab1 select * from src where key = 0 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: ttp@tab1 +POSTHOOK: query: insert overwrite table ttp.tab1 select * from src where key = 0 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: ttp@tab1 +POSTHOOK: Lineage: tab1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tab1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- describe/select should now use temp table +describe ttp.tab1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: ttp@tab1 +POSTHOOK: query: -- describe/select should now use temp table +describe ttp.tab1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: ttp@tab1 +c1 int +c2 string +PREHOOK: query: select * from ttp.tab1 +PREHOOK: type: QUERY +PREHOOK: Input: ttp@tab1 +#### A masked pattern was here #### +POSTHOOK: query: select * from ttp.tab1 +POSTHOOK: type: QUERY +POSTHOOK: Input: ttp@tab1 +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +PREHOOK: query: -- rename the temp table, and now we can see our non-temp table again +use ttp +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: -- rename the temp table, and now we can see our non-temp table again +use ttp +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: alter table tab1 rename to tab2 +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: ttp@tab1 +PREHOOK: Output: ttp@tab1 +POSTHOOK: query: alter table tab1 rename to tab2 +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: ttp@tab1 +POSTHOOK: Output: ttp@tab1 +POSTHOOK: Output: ttp@tab2 +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: describe ttp.tab1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: ttp@tab1 +POSTHOOK: query: describe ttp.tab1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: ttp@tab1 +a1 string +a2 string +PREHOOK: query: select * from ttp.tab1 +PREHOOK: type: QUERY +PREHOOK: Input: ttp@tab1 +#### A masked pattern was here #### +POSTHOOK: query: select * from ttp.tab1 +POSTHOOK: type: QUERY +POSTHOOK: Input: ttp@tab1 +#### A masked pattern was here #### +5 val_5 +5 val_5 +5 val_5 +PREHOOK: query: -- now the non-temp tab2 should be hidden +describe ttp.tab2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: ttp@tab2 +POSTHOOK: query: -- now the non-temp tab2 should be hidden +describe ttp.tab2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: ttp@tab2 +c1 int +c2 string +PREHOOK: query: select * from ttp.tab2 +PREHOOK: type: QUERY +PREHOOK: Input: ttp@tab2 +#### A masked pattern was here #### +POSTHOOK: query: select * from ttp.tab2 +POSTHOOK: type: QUERY +POSTHOOK: Input: ttp@tab2 +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +PREHOOK: query: -- drop the temp table, and now we should be able to see the non-temp tab2 again +drop table ttp.tab2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- drop the temp table, and now we should be able to see the non-temp tab2 again +drop table ttp.tab2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: ttp@tab2 +PREHOOK: query: describe ttp.tab2 +PREHOOK: type: DESCTABLE +PREHOOK: Input: ttp@tab2 +POSTHOOK: query: describe ttp.tab2 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: ttp@tab2 +b1 string +b2 string +PREHOOK: query: select * from ttp.tab2 +PREHOOK: type: QUERY +PREHOOK: Input: ttp@tab2 +#### A masked pattern was here #### +POSTHOOK: query: select * from ttp.tab2 +POSTHOOK: type: QUERY +POSTHOOK: Input: ttp@tab2 +#### A masked pattern was here #### +2 val_2 +PREHOOK: query: drop database ttp cascade +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:ttp +PREHOOK: Output: database:ttp +PREHOOK: Output: ttp@tab1 +PREHOOK: Output: ttp@tab2 +POSTHOOK: query: drop database ttp cascade +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:ttp +POSTHOOK: Output: database:ttp +POSTHOOK: Output: ttp@tab1 +POSTHOOK: Output: ttp@tab2 diff --git ql/src/test/results/clientpositive/temp_table_subquery1.q.out ql/src/test/results/clientpositive/temp_table_subquery1.q.out new file mode 100644 index 0000000..3301890 --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_subquery1.q.out @@ -0,0 +1,99 @@ +PREHOOK: query: create temporary table src_temp as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create temporary table src_temp as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_temp +PREHOOK: query: -- subquery exists +select * +from src_temp b +where exists + (select a.key + from src_temp a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_temp +#### A masked pattern was here #### +POSTHOOK: query: -- subquery exists +select * +from src_temp b +where exists + (select a.key + from src_temp a + where b.value = a.value and a.key = b.key and a.value > 'val_9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_temp +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- subquery in +select * +from src_temp +where src_temp.key in (select key from src_temp s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src_temp +#### A masked pattern was here #### +POSTHOOK: query: -- subquery in +select * +from src_temp +where src_temp.key in (select key from src_temp s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_temp +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: select b.key, min(b.value) +from src_temp b +group by b.key +having b.key in ( select a.key + from src_temp a + where a.value > 'val_9' and a.value = min(b.value) + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_temp +#### A masked pattern was here #### +POSTHOOK: query: select b.key, min(b.value) +from src_temp b +group by b.key +having b.key in ( select a.key + from src_temp a + where a.value > 'val_9' and a.value = min(b.value) + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_temp +#### A masked pattern was here #### +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: drop table src_temp +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_temp +PREHOOK: Output: default@src_temp +POSTHOOK: query: drop table src_temp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_temp +POSTHOOK: Output: default@src_temp diff --git ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out new file mode 100644 index 0000000..b6996b4 --- /dev/null +++ ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out @@ -0,0 +1,749 @@ +PREHOOK: query: DROP TABLE part +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE part +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- data setup +CREATE TEMPORARY TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- data setup +CREATE TEMPORARY TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@part +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@part +PREHOOK: query: drop table over10k +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table over10k +POSTHOOK: type: DROPTABLE +PREHOOK: query: create temporary table over10k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) + row format delimited + fields terminated by '|' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create temporary table over10k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + bo boolean, + s string, + ts timestamp, + dec decimal(4,2), + bin binary) + row format delimited + fields terminated by '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@over10k +PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@over10k +POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@over10k +PREHOOK: query: select p_mfgr, p_retailprice, p_size, +round(sum(p_retailprice) over w1 , 2) = round(sum(lag(p_retailprice,1,0.0)) over w1 + last_value(p_retailprice) over w1 , 2), +max(p_retailprice) over w1 - min(p_retailprice) over w1 = last_value(p_retailprice) over w1 - first_value(p_retailprice) over w1 +from part +window w1 as (distribute by p_mfgr sort by p_retailprice) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, p_retailprice, p_size, +round(sum(p_retailprice) over w1 , 2) = round(sum(lag(p_retailprice,1,0.0)) over w1 + last_value(p_retailprice) over w1 , 2), +max(p_retailprice) over w1 - min(p_retailprice) over w1 = last_value(p_retailprice) over w1 - first_value(p_retailprice) over w1 +from part +window w1 as (distribute by p_mfgr sort by p_retailprice) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 2 true true +Manufacturer#1 1173.15 2 true true +Manufacturer#1 1414.42 28 true true +Manufacturer#1 1602.59 6 true true +Manufacturer#1 1632.66 42 true true +Manufacturer#1 1753.76 34 true true +Manufacturer#2 1690.68 14 true true +Manufacturer#2 1698.66 25 true true +Manufacturer#2 1701.6 18 true true +Manufacturer#2 1800.7 40 true true +Manufacturer#2 2031.98 2 true true +Manufacturer#3 1190.27 14 true true +Manufacturer#3 1337.29 45 true true +Manufacturer#3 1410.39 19 true true +Manufacturer#3 1671.68 17 true true +Manufacturer#3 1922.98 1 true true +Manufacturer#4 1206.26 27 true true +Manufacturer#4 1290.35 12 true true +Manufacturer#4 1375.42 39 true true +Manufacturer#4 1620.67 10 true true +Manufacturer#4 1844.92 7 true true +Manufacturer#5 1018.1 46 true true +Manufacturer#5 1464.48 23 true true +Manufacturer#5 1611.66 6 true true +Manufacturer#5 1788.73 2 true true +Manufacturer#5 1789.69 31 true true +PREHOOK: query: select p_mfgr, p_retailprice, p_size, +rank() over (distribute by p_mfgr sort by p_retailprice) as r, +sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between unbounded preceding and current row) as s2, +sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between unbounded preceding and current row) -5 as s1 +from part +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, p_retailprice, p_size, +rank() over (distribute by p_mfgr sort by p_retailprice) as r, +sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between unbounded preceding and current row) as s2, +sum(p_retailprice) over (distribute by p_mfgr sort by p_retailprice rows between unbounded preceding and current row) -5 as s1 +from part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 2 1 1173.15 1168.15 +Manufacturer#1 1173.15 2 1 2346.3 2341.3 +Manufacturer#1 1414.42 28 3 3760.7200000000003 3755.7200000000003 +Manufacturer#1 1602.59 6 4 5363.31 5358.31 +Manufacturer#1 1632.66 42 5 6995.97 6990.97 +Manufacturer#1 1753.76 34 6 8749.73 8744.73 +Manufacturer#2 1690.68 14 1 1690.68 1685.68 +Manufacturer#2 1698.66 25 2 3389.34 3384.34 +Manufacturer#2 1701.6 18 3 5090.9400000000005 5085.9400000000005 +Manufacturer#2 1800.7 40 4 6891.64 6886.64 +Manufacturer#2 2031.98 2 5 8923.62 8918.62 +Manufacturer#3 1190.27 14 1 1190.27 1185.27 +Manufacturer#3 1337.29 45 2 2527.56 2522.56 +Manufacturer#3 1410.39 19 3 3937.95 3932.95 +Manufacturer#3 1671.68 17 4 5609.63 5604.63 +Manufacturer#3 1922.98 1 5 7532.610000000001 7527.610000000001 +Manufacturer#4 1206.26 27 1 1206.26 1201.26 +Manufacturer#4 1290.35 12 2 2496.6099999999997 2491.6099999999997 +Manufacturer#4 1375.42 39 3 3872.0299999999997 3867.0299999999997 +Manufacturer#4 1620.67 10 4 5492.7 5487.7 +Manufacturer#4 1844.92 7 5 7337.62 7332.62 +Manufacturer#5 1018.1 46 1 1018.1 1013.1 +Manufacturer#5 1464.48 23 2 2482.58 2477.58 +Manufacturer#5 1611.66 6 3 4094.24 4089.24 +Manufacturer#5 1788.73 2 4 5882.969999999999 5877.969999999999 +Manufacturer#5 1789.69 31 5 7672.66 7667.66 +PREHOOK: query: select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +#### A masked pattern was here #### +POSTHOOK: query: select s, si, f, si - lead(f, 3) over (partition by t order by bo,s,si,f desc) from over10k limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +#### A masked pattern was here #### +alice allen 400 76.31 337.23 +alice davidson 384 71.97 357.79 +alice king 455 2.48 395.93 +alice king 458 62.77 384.16998 +alice xylophone 485 26.21 464.05 +bob falkner 260 59.07 242.4 +bob ichabod 454 73.83 381.7 +bob polk 264 20.95 257.17 +bob underhill 454 17.6 424.94 +bob underhill 465 72.3 453.17 +bob van buren 433 6.83 398.4 +calvin ichabod 431 29.06 334.22 +david garcia 485 11.83 421.51 +ethan steinbeck 298 34.6 288.14 +fred ellison 376 96.78 330.76 +holly steinbeck 384 63.49 293.7 +holly underhill 318 9.86 269.91 +irene ellison 458 45.24 365.29 +irene underhill 307 90.3 244.19 +jessica johnson 494 48.09 490.18 +jessica king 459 92.71 452.2 +jessica white 284 62.81 209.08 +luke garcia 311 3.82 267.27 +luke young 451 6.8 429.0 +mike king 275 74.92 211.81 +oscar garcia 362 43.73 340.66 +priscilla laertes 316 22.0 296.06 +priscilla quirinius 423 63.19 362.72 +priscilla zipper 485 21.34 400.61 +quinn ellison 266 19.94 209.95 +quinn polk 507 60.28 447.66 +sarah robinson 320 84.39 309.74 +tom polk 346 56.05 320.33 +ulysses ellison 381 59.34 358.66 +ulysses quirinius 303 10.26 259.6 +ulysses robinson 313 25.67 269.31 +ulysses steinbeck 333 22.34 270.61 +victor allen 337 43.4 311.5 +victor hernandez 447 43.69 375.22 +victor xylophone 438 62.39 424.33 +wendy quirinius 279 25.5 250.25 +wendy robinson 275 71.78 262.88 +wendy xylophone 314 13.67 295.73 +xavier garcia 493 28.75 474.56 +zach thompson 386 12.12 377.63 +zach young 286 18.27 263.65 +alice falkner 280 18.44 227.7 +bob ellison 339 8.37 300.95 +bob johnson 374 22.35 326.49 +calvin white 280 52.3 198.32 +david carson 270 38.05 255.77 +david falkner 469 47.51 388.35 +david hernandez 408 81.68 339.27 +ethan underhill 339 14.23 256.26 +gabriella brown 498 80.65 413.25 +holly nixon 505 68.73 440.71 +holly polk 268 82.74 182.04001 +holly thompson 387 84.75 298.22 +irene young 458 64.29 401.8 +jessica miller 299 85.96 243.41 +katie ichabod 469 88.78 385.61 +luke ichabod 289 56.2 286.74 +luke king 337 55.59 274.88 +mike allen 465 83.39 383.03 +mike polk 500 2.26 427.74 +mike white 454 62.12 430.78 +mike xylophone 448 81.97 447.17 +nick nixon 335 72.26 240.78 +nick robinson 350 23.22 294.59 +oscar davidson 432 0.83 420.93 +oscar johnson 315 94.22 233.05 +oscar johnson 469 55.41 468.44 +oscar miller 324 11.07 265.19 +rachel davidson 507 81.95 468.78 +rachel thompson 344 0.56 246.12 +sarah miller 386 58.81 304.36 +sarah xylophone 275 38.22 177.48999 +sarah zipper 376 97.88 294.61 +tom hernandez 467 81.64 459.9 +tom hernandez 477 97.51 415.19 +tom steinbeck 414 81.39 361.87 +ulysses carson 343 7.1 314.22 +victor robinson 415 61.81 349.5 +victor thompson 344 52.13 NULL +xavier ovid 280 28.78 NULL +yuri xylophone 430 65.5 NULL +alice underhill 389 26.68 368.06 +alice underhill 446 6.49 444.21 +bob ovid 331 67.12 236.43 +bob van buren 406 20.94 383.32 +david falkner 406 1.79 374.34 +david miller 450 94.57 380.13 +ethan allen 380 22.68 375.6 +ethan king 395 31.66 361.51 +ethan nixon 475 69.87 431.39 +ethan polk 283 4.4 243.82 +fred allen 331 33.49 281.68 +fred king 511 43.61 457.22 +fred polk 261 39.18 248.73 +fred young 303 49.32 221.51001 +PREHOOK: query: select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +#### A masked pattern was here #### +POSTHOOK: query: select s, i, i - lead(i, 3, 0) over (partition by si order by i,s) from over10k limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +#### A masked pattern was here #### +wendy garcia 65540 -18 +ethan thompson 65543 -20 +zach nixon 65549 -31 +alice robinson 65558 -28 +wendy nixon 65563 -33 +victor robinson 65580 -19 +ethan falkner 65586 -18 +victor davidson 65596 -17 +xavier quirinius 65599 -14 +fred quirinius 65604 -11 +nick zipper 65613 -3 +xavier van buren 65613 -7 +victor johnson 65615 -12 +alice ovid 65616 -24 +xavier ovid 65620 -23 +ulysses white 65627 -24 +sarah white 65640 -13 +calvin young 65643 -25 +victor thompson 65651 -42 +calvin johnson 65653 -53 +irene polk 65668 -45 +zach underhill 65693 -38 +quinn hernandez 65706 -27 +rachel ovid 65713 -24 +gabriella falkner 65731 -7 +zach white 65733 -8 +fred hernandez 65737 -7 +rachel ellison 65738 -6 +oscar steinbeck 65741 -6 +alice ellison 65744 -8 +tom allen 65744 -19 +quinn quirinius 65747 -31 +victor hernandez 65752 -26 +holly xylophone 65763 -26 +david davidson 65778 65778 +ulysses young 65778 65778 +sarah brown 65789 65789 +xavier brown 65541 -16 +zach hernandez 65542 -18 +katie ichabod 65547 -19 +oscar young 65557 -15 +holly white 65560 -14 +priscilla laertes 65566 -9 +ethan king 65572 -6 +zach hernandez 65574 -10 +oscar thompson 65575 -13 +victor xylophone 65578 -16 +gabriella ellison 65584 -26 +nick quirinius 65588 -22 +holly robinson 65594 -18 +alice xylophone 65610 -16 +yuri brown 65610 -21 +sarah hernandez 65612 -26 +katie garcia 65626 -28 +jessica laertes 65631 -23 +ethan underhill 65638 -17 +irene young 65654 -37 +priscilla thompson 65654 -40 +luke quirinius 65655 -44 +david brown 65691 -20 +luke falkner 65694 -18 +priscilla miller 65699 -20 +rachel robinson 65711 -9 +ethan polk 65712 -10 +wendy brown 65719 -13 +mike underhill 65720 -18 +zach underhill 65722 -26 +nick zipper 65732 -20 +fred brown 65738 -18 +ulysses young 65748 -23 +nick davidson 65752 -19 +fred zipper 65756 -15 +yuri nixon 65771 -10 +zach hernandez 65771 -19 +zach zipper 65771 65771 +alice underhill 65781 65781 +oscar laertes 65790 65790 +sarah zipper 65546 -19 +bob falkner 65551 -17 +luke ovid 65551 -17 +katie allen 65565 -4 +nick falkner 65568 -5 +zach steinbeck 65568 -11 +oscar van buren 65569 -13 +gabriella young 65573 -11 +jessica ichabod 65579 -24 +david garcia 65582 -24 +nick xylophone 65584 -27 +calvin johnson 65603 -14 +xavier zipper 65606 -50 +alice nixon 65611 -58 +jessica laertes 65617 -62 +fred king 65656 -61 +priscilla underhill 65669 -48 +priscilla zipper 65679 -45 +nick king 65717 -11 +sarah polk 65717 -17 +irene quirinius 65724 -28 +tom laertes 65728 -25 +yuri johnson 65734 -27 +PREHOOK: query: select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +#### A masked pattern was here #### +POSTHOOK: query: select s, si, d, si - lag(d, 3) over (partition by b order by si,s,d) from over10k limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +#### A masked pattern was here #### +jessica ellison 262 30.41 NULL +david young 266 45.12 NULL +jessica steinbeck 274 2.15 NULL +david zipper 275 43.45 244.59 +zach nixon 283 15.95 237.88 +holly allen 285 24.37 282.85 +irene garcia 292 33.54 248.55 +ulysses xylophone 292 44.66 276.05 +irene van buren 309 35.81 284.63 +sarah miller 312 6.65 278.46 +victor garcia 312 39.14 267.34000000000003 +ethan ichabod 319 29.4 283.19 +wendy falkner 322 10.02 315.35 +oscar miller 324 25.95 284.86 +david ovid 332 28.34 302.6 +alice zipper 333 3.38 322.98 +yuri nixon 333 8.28 307.05 +ulysses nixon 335 18.48 306.66 +david ovid 336 9.36 332.62 +calvin falkner 337 17.63 328.72 +katie quirinius 349 11.3 330.52 +quinn miller 351 22.46 341.64 +victor xylophone 357 38.58 339.37 +ethan garcia 368 9.2 356.7 +nick steinbeck 395 37.54 372.54 +ulysses ichabod 415 47.61 376.42 +rachel thompson 416 37.99 406.8 +calvin young 418 47.22 380.46 +katie xylophone 425 32.59 377.39 +nick quirinius 429 19.63 391.01 +ethan ellison 453 47.92 405.78 +irene nixon 454 48.03 421.40999999999997 +bob steinbeck 462 47.04 442.37 +luke robinson 462 47.48 414.08 +gabriella steinbeck 467 9.35 418.97 +tom hernandez 467 29.36 419.96 +irene polk 485 14.26 437.52 +mike xylophone 494 36.92 484.65 +calvin allen 499 39.99 469.64 +quinn steinbeck 503 16.62 488.74 +calvin thompson 263 30.87 NULL +rachel quirinius 263 29.46 NULL +ulysses garcia 263 31.85 NULL +mike steinbeck 266 48.57 235.13 +rachel young 275 14.75 245.54 +tom king 278 31.11 246.15 +oscar robinson 283 30.35 234.43 +zach allen 284 1.88 269.25 +bob king 308 27.61 276.89 +ulysses allen 310 22.77 279.65 +fred nixon 317 0.48 315.12 +gabriella robinson 321 0.33 293.39 +bob johnson 325 9.61 302.23 +rachel davidson 335 2.34 334.52 +fred brown 337 5.8 336.67 +wendy ellison 350 20.25 340.39 +zach falkner 391 13.67 388.66 +katie xylophone 410 39.09 404.2 +holly king 413 3.56 392.75 +sarah van buren 417 7.81 403.33 +calvin van buren 430 36.01 390.90999999999997 +katie white 434 33.56 430.44 +oscar quirinius 454 7.03 446.19 +zach young 505 18.19 468.99 +gabriella robinson 506 12.8 472.44 +sarah xylophone 507 16.09 499.97 +rachel thompson 267 46.87 NULL +gabriella van buren 271 41.04 NULL +mike steinbeck 284 11.44 NULL +ethan ovid 293 2.08 246.13 +luke falkner 293 40.67 251.96 +irene nixon 321 24.35 309.56 +mike van buren 327 2.58 324.92 +ulysses robinson 329 26.64 288.33 +quinn laertes 332 10.71 307.65 +tom polk 346 34.03 343.42 +jessica johnson 352 45.71 325.36 +xavier davidson 354 33.9 343.29 +wendy nixon 364 29.42 329.97 +jessica quirinius 375 47.33 329.29 +xavier brown 376 26.17 342.1 +gabriella davidson 383 18.87 353.58 +jessica brown 388 34.09 340.67 +gabriella garcia 391 32.44 364.83 +ethan miller 396 49.07 377.13 +bob garcia 416 7.82 381.90999999999997 +priscilla hernandez 416 29.94 383.56 +holly nixon 419 17.81 369.93 +nick underhill 429 39.54 421.18 +xavier falkner 434 0.88 404.06 +luke robinson 461 44.02 443.19 +bob underhill 465 22.58 425.46 +ulysses king 483 37.98 482.12 +jessica miller 486 26.14 441.98 +bob ovid 493 9.7 470.42 +alice falkner 500 37.85 462.02 +quinn xylophone 267 49.8 NULL +gabriella thompson 268 17.15 NULL +calvin xylophone 275 49.32 NULL +gabriella zipper 279 30.41 229.2 +PREHOOK: query: select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +#### A masked pattern was here #### +POSTHOOK: query: select s, lag(s, 3, 'fred') over (partition by f order by b) from over10k limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +#### A masked pattern was here #### +yuri thompson fred +bob ichabod fred +luke king fred +luke steinbeck fred +fred zipper fred +quinn miller fred +calvin van buren fred +holly steinbeck fred +david davidson fred +calvin thompson fred +calvin quirinius fred +david ovid fred +holly thompson fred +nick zipper fred +victor steinbeck fred +victor robinson fred +zach ovid fred +ulysses zipper fred +luke falkner fred +irene thompson fred +yuri johnson fred +ulysses falkner fred +gabriella robinson fred +alice robinson fred +priscilla xylophone fred +david laertes fred +mike underhill fred +victor van buren fred +holly falkner fred +priscilla falkner fred +ethan ovid fred +luke zipper fred +mike steinbeck fred +calvin white fred +alice quirinius fred +irene miller fred +wendy polk fred +nick young fred +yuri davidson fred +ethan ellison fred +zach hernandez fred +wendy miller fred +katie underhill fred +irene zipper fred +holly allen fred +quinn brown fred +calvin ovid fred +zach robinson fred +nick miller fred +mike allen fred +yuri van buren fred +priscilla young fred +zach miller fred +victor xylophone fred +sarah falkner fred +rachel ichabod fred +alice robinson fred +calvin ovid fred +calvin ovid fred +luke laertes fred +david hernandez fred +alice ovid fred +luke quirinius fred +oscar white fred +zach falkner fred +rachel thompson fred +priscilla king fred +xavier polk fred +wendy ichabod fred +rachel ovid fred +wendy allen fred +luke brown fred +mike brown fred +oscar ichabod fred +xavier garcia fred +yuri brown fred +bob xylophone fred +luke davidson fred +ethan quirinius fred +zach davidson fred +irene miller fred +wendy king fred +bob zipper fred +sarah thompson fred +bob carson fred +bob laertes fred +xavier allen fred +sarah robinson fred +david king fred +oscar davidson fred +victor hernandez fred +wendy polk fred +david ellison fred +ulysses johnson fred +jessica ovid fred +bob king fred +ulysses garcia fred +irene falkner fred +holly robinson fred +yuri white fred +PREHOOK: query: select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mfgr) from part +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, avg(p_retailprice) over(partition by p_mfgr, p_type order by p_mfgr) from part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1753.76 +Manufacturer#1 1632.66 +Manufacturer#1 1602.59 +Manufacturer#1 1173.15 +Manufacturer#1 1173.15 +Manufacturer#1 1414.42 +Manufacturer#2 1800.7 +Manufacturer#2 1690.68 +Manufacturer#2 2031.98 +Manufacturer#2 1698.66 +Manufacturer#2 1701.6 +Manufacturer#3 1922.98 +Manufacturer#3 1410.39 +Manufacturer#3 1671.68 +Manufacturer#3 1190.27 +Manufacturer#3 1337.29 +Manufacturer#4 1844.92 +Manufacturer#4 1375.42 +Manufacturer#4 1620.67 +Manufacturer#4 1206.26 +Manufacturer#4 1290.35 +Manufacturer#5 1018.1 +Manufacturer#5 1464.48 +Manufacturer#5 1789.69 +Manufacturer#5 1788.73 +Manufacturer#5 1611.66 +PREHOOK: query: select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, avg(p_retailprice) over(partition by p_mfgr order by p_type,p_mfgr rows between unbounded preceding and current row) from part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1753.76 +Manufacturer#1 1693.21 +Manufacturer#1 1663.0033333333333 +Manufacturer#1 1540.54 +Manufacturer#1 1467.062 +Manufacturer#1 1458.2883333333332 +Manufacturer#2 1800.7 +Manufacturer#2 1745.69 +Manufacturer#2 1841.1200000000001 +Manufacturer#2 1805.505 +Manufacturer#2 1784.7240000000002 +Manufacturer#3 1922.98 +Manufacturer#3 1666.685 +Manufacturer#3 1668.3500000000001 +Manufacturer#3 1548.83 +Manufacturer#3 1506.522 +Manufacturer#4 1844.92 +Manufacturer#4 1610.17 +Manufacturer#4 1613.67 +Manufacturer#4 1511.8175 +Manufacturer#4 1467.5240000000001 +Manufacturer#5 1018.1 +Manufacturer#5 1241.29 +Manufacturer#5 1424.0900000000001 +Manufacturer#5 1515.25 +Manufacturer#5 1534.532 +PREHOOK: query: -- multi table insert test +create table t1 (a1 int, b1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: -- multi table insert test +create table t1 (a1 int, b1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: create table t2 (a1 int, b1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table t2 (a1 int, b1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +PREHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * +PREHOOK: type: QUERY +PREHOOK: Input: default@over10k +PREHOOK: Output: default@t1 +PREHOOK: Output: default@t2 +POSTHOOK: query: from (select sum(i) over (partition by ts order by i), s from over10k) tt insert overwrite table t1 select * insert overwrite table t2 select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@over10k +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t2 +POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: t1.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: t2.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +PREHOOK: query: select * from t1 limit 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 limit 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +65542 rachel thompson +131088 oscar brown +262258 wendy steinbeck +PREHOOK: query: select * from t2 limit 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: select * from t2 limit 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +65542 rachel thompson +131088 oscar brown +262258 wendy steinbeck +PREHOOK: query: select p_mfgr, p_retailprice, p_size, +round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2) +from part +window w1 as (distribute by p_mfgr sort by p_retailprice) +limit 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: select p_mfgr, p_retailprice, p_size, +round(sum(p_retailprice) over w1 , 2) + 50.0 = round(sum(lag(p_retailprice,1,50.0)) over w1 + (last_value(p_retailprice) over w1),2) +from part +window w1 as (distribute by p_mfgr sort by p_retailprice) +limit 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 2 true +Manufacturer#1 1173.15 2 true +Manufacturer#1 1414.42 28 true +Manufacturer#1 1602.59 6 true +Manufacturer#1 1632.66 42 true +Manufacturer#1 1753.76 34 true +Manufacturer#2 1690.68 14 true +Manufacturer#2 1698.66 25 true +Manufacturer#2 1701.6 18 true +Manufacturer#2 1800.7 40 true +Manufacturer#2 2031.98 2 true diff --git ql/src/test/results/clientpositive/tez/temp_table.q.out ql/src/test/results/clientpositive/tez/temp_table.q.out new file mode 100644 index 0000000..a5b7e77 --- /dev/null +++ ql/src/test/results/clientpositive/tez/temp_table.q.out @@ -0,0 +1,403 @@ +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 0) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: foo + isTemporary: true + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@foo +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 1) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: bar + isTemporary: true + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bar +PREHOOK: query: DESCRIBE foo +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@foo +POSTHOOK: query: DESCRIBE foo +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@foo +key string +value string +PREHOOK: query: DESCRIBE bar +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@bar +POSTHOOK: query: DESCRIBE bar +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@bar +key string +value string +PREHOOK: query: explain select * from foo limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from foo limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: foo + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select * from foo limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from foo limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +238 val_238 +86 val_86 +278 val_278 +98 val_98 +484 val_484 +150 val_150 +224 val_224 +66 val_66 +128 val_128 +146 val_146 +PREHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Union 2 (CONTAINS) + Map 4 <- Union 2 (CONTAINS) + Reducer 3 <- Union 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: bar + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + value expressions: _col1 (type: string) + Map 4 + Map Operator Tree: + TableScan + alias: foo + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + value expressions: _col1 (type: string) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 2 + Vertex: Union 2 + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bar +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bar +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@baz +PREHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +PREHOOK: Output: default@baz +POSTHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +POSTHOOK: Output: default@baz +POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TEMPORARY TABLE bay (key string, value string) STORED AS orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY TABLE bay (key string, value string) STORED AS orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bay +PREHOOK: query: select * from bay +PREHOOK: type: QUERY +PREHOOK: Input: default@bay +#### A masked pattern was here #### +POSTHOOK: query: select * from bay +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bay +#### A masked pattern was here #### +PREHOOK: query: INSERT OVERWRITE TABLE bay SELECT * FROM src ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@bay +POSTHOOK: query: INSERT OVERWRITE TABLE bay SELECT * FROM src ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bay +POSTHOOK: Lineage: bay.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: bay.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from bay limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bay +#### A masked pattern was here #### +POSTHOOK: query: select * from bay limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bay +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +alltypesorc +bar +bay +baz +foo +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: CREATE DATABASE two +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: CREATE DATABASE two +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE two +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE two +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@foo +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@foo +POSTHOOK: Output: two@foo +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +foo +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE two CASCADE +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:two +PREHOOK: Output: database:two +PREHOOK: Output: two@foo +POSTHOOK: query: DROP DATABASE two CASCADE +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:two +POSTHOOK: Output: database:two +POSTHOOK: Output: two@foo +PREHOOK: query: DROP TABLE bay +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bay +PREHOOK: Output: default@bay +POSTHOOK: query: DROP TABLE bay +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bay +POSTHOOK: Output: default@bay