diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index 1a19610..00b2df4 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -39,7 +39,7 @@ stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q,file_with_header_footer_negative.q,udf_local_resource.q tez_fsstat.q,mapjoin_decimal.q,tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q,tez_union.q,bucket_map_join_tez1.q,bucket_map_join_tez2.q,tez_schema_evolution.q - cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q + temp_table.q,cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index cc802c6..d425d2b 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -230,6 +230,7 @@ struct Table { 11: string viewExpandedText, // expanded view text, null for non-view 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, + 14: optional bool temporary=false } struct Partition { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 8014f2a..a6cd09a 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -2455,8 +2455,8 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.__isset, b.__isset); } -const char* Table::ascii_fingerprint = "68640B4B66B355CF317429AF70D2C260"; -const uint8_t Table::binary_fingerprint[16] = {0x68,0x64,0x0B,0x4B,0x66,0xB3,0x55,0xCF,0x31,0x74,0x29,0xAF,0x70,0xD2,0xC2,0x60}; +const char* Table::ascii_fingerprint = "29EFB2A5970EF572039E5D94CC78AA85"; +const uint8_t Table::binary_fingerprint[16] = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85}; uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -2609,6 +2609,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 14: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->temporary); + this->__isset.temporary = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -2695,6 +2703,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.temporary) { + xfer += oprot->writeFieldBegin("temporary", ::apache::thrift::protocol::T_BOOL, 14); + xfer += oprot->writeBool(this->temporary); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -2715,6 +2728,7 @@ void swap(Table &a, Table &b) { swap(a.viewExpandedText, b.viewExpandedText); swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); + swap(a.temporary, b.temporary); swap(a.__isset, b.__isset); } diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 413256f..a0f208a 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -1332,7 +1332,7 @@ class StorageDescriptor { void swap(StorageDescriptor &a, StorageDescriptor &b); typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} bool tableName; bool dbName; bool owner; @@ -1346,15 +1346,16 @@ typedef struct _Table__isset { bool viewExpandedText; bool tableType; bool privileges; + bool temporary; } _Table__isset; class Table { public: - static const char* ascii_fingerprint; // = "68640B4B66B355CF317429AF70D2C260"; - static const uint8_t binary_fingerprint[16]; // = {0x68,0x64,0x0B,0x4B,0x66,0xB3,0x55,0xCF,0x31,0x74,0x29,0xAF,0x70,0xD2,0xC2,0x60}; + static const char* ascii_fingerprint; // = "29EFB2A5970EF572039E5D94CC78AA85"; + static const uint8_t binary_fingerprint[16]; // = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85}; - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType() { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) { } virtual ~Table() throw() {} @@ -1372,6 +1373,7 @@ class Table { std::string viewExpandedText; std::string tableType; PrincipalPrivilegeSet privileges; + bool temporary; _Table__isset __isset; @@ -1428,6 +1430,11 @@ class Table { __isset.privileges = true; } + void __set_temporary(const bool val) { + temporary = val; + __isset.temporary = true; + } + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -1458,6 +1465,10 @@ class Table { return false; else if (__isset.privileges && !(privileges == rhs.privileges)) return false; + if (__isset.temporary != rhs.__isset.temporary) + return false; + else if (__isset.temporary && !(temporary == rhs.temporary)) + return false; return true; } bool operator != (const Table &rhs) const { diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 1e7fca3..229a819 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -47,6 +47,7 @@ private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)11); private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); + private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -67,6 +68,7 @@ private String viewExpandedText; // required private String tableType; // required private PrincipalPrivilegeSet privileges; // optional + private boolean temporary; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -82,7 +84,8 @@ VIEW_ORIGINAL_TEXT((short)10, "viewOriginalText"), VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), TABLE_TYPE((short)12, "tableType"), - PRIVILEGES((short)13, "privileges"); + PRIVILEGES((short)13, "privileges"), + TEMPORARY((short)14, "temporary"); private static final Map byName = new HashMap(); @@ -123,6 +126,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_TYPE; case 13: // PRIVILEGES return PRIVILEGES; + case 14: // TEMPORARY + return TEMPORARY; default: return null; } @@ -166,8 +171,9 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private static final int __RETENTION_ISSET_ID = 2; + private static final int __TEMPORARY_ISSET_ID = 3; private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.PRIVILEGES}; + private _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -200,11 +206,15 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); + tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } public Table() { + this.temporary = false; + } public Table( @@ -293,6 +303,7 @@ public Table(Table other) { if (other.isSetPrivileges()) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } + this.temporary = other.temporary; } public Table deepCopy() { @@ -317,6 +328,8 @@ public void clear() { this.viewExpandedText = null; this.tableType = null; this.privileges = null; + this.temporary = false; + } public String getTableName() { @@ -641,6 +654,28 @@ public void setPrivilegesIsSet(boolean value) { } } + public boolean isTemporary() { + return this.temporary; + } + + public void setTemporary(boolean temporary) { + this.temporary = temporary; + setTemporaryIsSet(true); + } + + public void unsetTemporary() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TEMPORARY_ISSET_ID); + } + + /** Returns true if field temporary is set (has been assigned a value) and false otherwise */ + public boolean isSetTemporary() { + return EncodingUtils.testBit(__isset_bitfield, __TEMPORARY_ISSET_ID); + } + + public void setTemporaryIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -747,6 +782,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case TEMPORARY: + if (value == null) { + unsetTemporary(); + } else { + setTemporary((Boolean)value); + } + break; + } } @@ -791,6 +834,9 @@ public Object getFieldValue(_Fields field) { case PRIVILEGES: return getPrivileges(); + case TEMPORARY: + return Boolean.valueOf(isTemporary()); + } throw new IllegalStateException(); } @@ -828,6 +874,8 @@ public boolean isSet(_Fields field) { return isSetTableType(); case PRIVILEGES: return isSetPrivileges(); + case TEMPORARY: + return isSetTemporary(); } throw new IllegalStateException(); } @@ -962,6 +1010,15 @@ public boolean equals(Table that) { return false; } + boolean this_present_temporary = true && this.isSetTemporary(); + boolean that_present_temporary = true && that.isSetTemporary(); + if (this_present_temporary || that_present_temporary) { + if (!(this_present_temporary && that_present_temporary)) + return false; + if (this.temporary != that.temporary) + return false; + } + return true; } @@ -1034,6 +1091,11 @@ public int hashCode() { if (present_privileges) builder.append(privileges); + boolean present_temporary = true && (isSetTemporary()); + builder.append(present_temporary); + if (present_temporary) + builder.append(temporary); + return builder.toHashCode(); } @@ -1175,6 +1237,16 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetTemporary()).compareTo(typedOther.isSetTemporary()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTemporary()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.temporary, typedOther.temporary); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1288,6 +1360,12 @@ public String toString() { } first = false; } + if (isSetTemporary()) { + if (!first) sb.append(", "); + sb.append("temporary:"); + sb.append(this.temporary); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1468,6 +1546,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 14: // TEMPORARY + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.temporary = iprot.readBool(); + struct.setTemporaryIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1557,6 +1643,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldEnd(); } } + if (struct.isSetTemporary()) { + oprot.writeFieldBegin(TEMPORARY_FIELD_DESC); + oprot.writeBool(struct.temporary); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1614,7 +1705,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetPrivileges()) { optionals.set(12); } - oprot.writeBitSet(optionals, 13); + if (struct.isSetTemporary()) { + optionals.set(13); + } + oprot.writeBitSet(optionals, 14); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1667,12 +1761,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetPrivileges()) { struct.privileges.write(oprot); } + if (struct.isSetTemporary()) { + oprot.writeBool(struct.temporary); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(13); + BitSet incoming = iprot.readBitSet(14); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1748,6 +1845,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.privileges.read(iprot); struct.setPrivilegesIsSet(true); } + if (incoming.get(13)) { + struct.temporary = iprot.readBool(); + struct.setTemporaryIsSet(true); + } } } diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 46f6a04..3db3ded 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -3162,6 +3162,7 @@ class Table { public $viewExpandedText = null; public $tableType = null; public $privileges = null; + public $temporary = false; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -3233,6 +3234,10 @@ class Table { 'type' => TType::STRUCT, 'class' => '\metastore\PrincipalPrivilegeSet', ), + 14 => array( + 'var' => 'temporary', + 'type' => TType::BOOL, + ), ); } if (is_array($vals)) { @@ -3275,6 +3280,9 @@ class Table { if (isset($vals['privileges'])) { $this->privileges = $vals['privileges']; } + if (isset($vals['temporary'])) { + $this->temporary = $vals['temporary']; + } } } @@ -3414,6 +3422,13 @@ class Table { $xfer += $input->skip($ftype); } break; + case 14: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->temporary); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -3523,6 +3538,11 @@ class Table { $xfer += $this->privileges->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->temporary !== null) { + $xfer += $output->writeFieldBegin('temporary', TType::BOOL, 14); + $xfer += $output->writeBool($this->temporary); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index b3eeb89..43a498a 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -2153,6 +2153,7 @@ class Table: - viewExpandedText - tableType - privileges + - temporary """ thrift_spec = ( @@ -2170,9 +2171,10 @@ class Table: (11, TType.STRING, 'viewExpandedText', None, None, ), # 11 (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 + (14, TType.BOOL, 'temporary', None, False, ), # 14 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None,): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -2186,6 +2188,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.viewExpandedText = viewExpandedText self.tableType = tableType self.privileges = privileges + self.temporary = temporary def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2275,6 +2278,11 @@ def read(self, iprot): self.privileges.read(iprot) else: iprot.skip(ftype) + elif fid == 14: + if ftype == TType.BOOL: + self.temporary = iprot.readBool(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2344,6 +2352,10 @@ def write(self, oprot): oprot.writeFieldBegin('privileges', TType.STRUCT, 13) self.privileges.write(oprot) oprot.writeFieldEnd() + if self.temporary is not None: + oprot.writeFieldBegin('temporary', TType.BOOL, 14) + oprot.writeBool(self.temporary) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 757461f..feb99db 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -531,6 +531,7 @@ class Table VIEWEXPANDEDTEXT = 11 TABLETYPE = 12 PRIVILEGES = 13 + TEMPORARY = 14 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -545,7 +546,8 @@ class Table VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'}, VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, - PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true} + PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, + TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} } def struct_fields; FIELDS; end diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 664dccd..a6fb71d 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -139,7 +139,7 @@ private boolean isConnected = false; private URI metastoreUris[]; private final HiveMetaHookLoader hookLoader; - private final HiveConf conf; + protected final HiveConf conf; private String tokenStrForm; private final boolean localMetaStore; @@ -147,7 +147,7 @@ private int retries = 5; private int retryDelaySeconds = 0; - static final private Log LOG = LogFactory.getLog("hive.metastore"); + static final protected Log LOG = LogFactory.getLog("hive.metastore"); public HiveMetaStoreClient(HiveConf conf) throws MetaException { @@ -555,7 +555,8 @@ public void createTable(Table tbl, EnvironmentContext envContext) throws Already } boolean success = false; try { - client.create_table_with_environment_context(tbl, envContext); + // Subclasses can override this step (for example, for temporary tables) + create_table_with_environment_context(tbl, envContext); if (hook != null) { hook.commitCreateTable(tbl); } @@ -617,7 +618,8 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD List tableList = getAllTables(name); for (String table : tableList) { try { - dropTable(name, table, deleteData, false); + // Subclasses can override this step (for example, for temporary tables) + dropTable(name, table, deleteData, false); } catch (UnsupportedOperationException e) { // Ignore Index tables, those will be dropped with parent tables } @@ -771,7 +773,7 @@ public void dropTable(String dbname, String name, boolean deleteData, } boolean success = false; try { - client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + drop_table_with_environment_context(dbname, name, deleteData, envContext); if (hook != null) { hook.commitDropTable(tbl, deleteData); } @@ -1342,7 +1344,7 @@ private Database deepCopy(Database database) { return copy; } - private Table deepCopy(Table table) { + protected Table deepCopy(Table table) { Table copy = null; if (table != null) { copy = new Table(table); @@ -1727,4 +1729,15 @@ public Function getFunction(String dbName, String funcName) return client.get_functions(dbName, pattern); } + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + client.create_table_with_environment_context(tbl, envContext); + } + + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index c1790b4..b411169 100755 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -105,7 +105,7 @@ private MetaStoreFS getMetaStoreFsHandler(Configuration conf) /** * Helper functions to convert IOException to MetaException */ - public FileSystem getFs(Path f) throws MetaException { + public static FileSystem getFs(Path f, Configuration conf) throws MetaException { try { return f.getFileSystem(conf); } catch (IOException e) { @@ -114,6 +114,10 @@ public FileSystem getFs(Path f) throws MetaException { return null; } + public FileSystem getFs(Path f) throws MetaException { + return getFs(f, conf); + } + public static void closeFs(FileSystem fs) throws MetaException { try { if (fs != null) { @@ -138,12 +142,16 @@ public static void closeFs(FileSystem fs) throws MetaException { * Path to be canonicalized * @return Path with canonical scheme and authority */ - public Path getDnsPath(Path path) throws MetaException { - FileSystem fs = getFs(path); + public static Path getDnsPath(Path path, Configuration conf) throws MetaException { + FileSystem fs = getFs(path, conf); return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path .toUri().getPath())); } + public Path getDnsPath(Path path) throws MetaException { + return getDnsPath(path, conf); + } + /** * Resolve the configured warehouse root dir with respect to the configuration * This involves opening the FileSystem corresponding to the warehouse root @@ -177,7 +185,6 @@ public Path getDefaultDatabasePath(String dbName) throws MetaException { return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX); } - public Path getTablePath(Database db, String tableName) throws MetaException { return getDnsPath(new Path(getDatabasePath(db), tableName.toLowerCase())); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index abc4290..4daefa7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -73,13 +73,13 @@ private final Map pathToCS = new ConcurrentHashMap(); // scratch path to use for all non-local (ie. hdfs) file system tmp folders - private final Path nonLocalScratchPath; + private Path nonLocalScratchPath; // scratch directory to use for local file system tmp folders - private final String localScratchDir; + private String localScratchDir; // the permission to scratch directory (local and hdfs) - private final String scratchDirPermission; + private String scratchDirPermission; // Keeps track of scratch directories created for different scheme/authority private final Map fsScratchDirs = new HashMap(); @@ -124,12 +124,9 @@ public Context(Configuration conf, String executionId) { // local & non-local tmp location is configurable. however it is the same across // all external file systems - nonLocalScratchPath = - new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), - executionId); - localScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), - executionId).toUri().getPath(); - scratchDirPermission= HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId); + localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath(); + scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index ede0cfd..ffd3948 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -4235,6 +4235,8 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { tbl.setSkewedColValues(crtTbl.getSkewedColValues()); } + tbl.getTTable().setTemporary(crtTbl.isTemporary()); + tbl.setStoredAsSubDirectories(crtTbl.isStoredAsSubDirectories()); tbl.setInputFormatClass(crtTbl.getInputFormat()); @@ -4382,6 +4384,8 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveExce params.putAll(crtTbl.getTblProps()); } + tbl.getTTable().setTemporary(crtTbl.isTemporary()); + if (crtTbl.isExternal()) { tbl.setProperty("EXTERNAL", "TRUE"); tbl.setTableType(TableType.EXTERNAL_TABLE); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a988b44..56c829c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2488,7 +2488,7 @@ public HiveMetaHook getHook( } }; return RetryingMetaStoreClient.getProxy(conf, hookLoader, - HiveMetaStoreClient.class.getName()); + SessionHiveMetaStoreClient.class.getName()); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java new file mode 100644 index 0000000..1497007 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -0,0 +1,307 @@ +package org.apache.hadoop.hive.ql.metadata; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf;; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.thrift.TException; + +public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements IMetaStoreClient { + + SessionHiveMetaStoreClient(HiveConf conf) throws MetaException { + super(conf); + } + + SessionHiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) throws MetaException { + super(conf, hookLoader); + } + + private Warehouse wh = null; + + private Warehouse getWh() throws MetaException { + if (wh == null) { + wh = new Warehouse(conf); + } + return wh; + } + + @Override + protected void create_table_with_environment_context( + org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + + if (tbl.isTemporary()) { + createTempTable(tbl, envContext); + return; + } + // non-temp tables should use underlying client. + super.create_table_with_environment_context(tbl, envContext); + } + + @Override + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + // First try temp table + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); + if (table != null) { + dropTempTable(table, deleteData, envContext); + return; + } + + // Try underlying client + super.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } + + @Override + public org.apache.hadoop.hive.metastore.api.Table getTable(String dbname, String name) throws MetaException, + TException, NoSuchObjectException { + // First check temp tables + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); + if (table != null) { + return deepCopy(table); // Original method used deepCopy(), do the same here. + } + + // Try underlying client + return super.getTable(dbname, name); + } + + @Override + public List getAllTables(String dbName) throws MetaException { + List tableNames = super.getAllTables(dbName); + + // May need to merge with list of temp tables + Map tables = SessionState.get().getTempTables().get(dbName); + if (tables == null || tables.size() == 0) { + return tableNames; + } + + // Get list of temp table names + Set tempTableNames = tables.keySet(); + + // Merge and sort result + Set allTableNames = new HashSet(tableNames.size() + tempTableNames.size()); + allTableNames.addAll(tableNames); + allTableNames.addAll(tempTableNames); + tableNames = new ArrayList(allTableNames); + Collections.sort(tableNames); + return tableNames; + } + + @Override + public List getTables(String dbName, String tablePattern) throws MetaException { + List tableNames = super.getTables(dbName, tablePattern); + + // May need to merge with list of temp tables + Map tables = SessionState.get().getTempTables().get(dbName); + if (tables == null || tables.size() == 0) { + return tableNames; + } + tablePattern = tablePattern.replaceAll("\\*", ".*"); + Pattern pattern = Pattern.compile(tablePattern); + Matcher matcher = pattern.matcher(""); + Set combinedTableNames = new HashSet(); + for (String tableName : tables.keySet()) { + if (matcher == null) { + matcher = pattern.matcher(tableName); + } else { + matcher.reset(tableName); + } + if (matcher.matches()) { + combinedTableNames.add(tableName); + } + } + + // Combine/sort temp and normal table results + combinedTableNames.addAll(tableNames); + tableNames = new ArrayList(combinedTableNames); + Collections.sort(tableNames); + return tableNames; + } + + @Override + public List getTableObjectsByName(String dbName, + List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + + if (SessionState.get().getTempTables().size() == 0) { + // No temp tables, just call underlying client + return super.getTableObjectsByName(dbName, tableNames); + } + + List tables = + new ArrayList(); + for (String tableName : tableNames) { + try { + org.apache.hadoop.hive.metastore.api.Table table = getTable(dbName, tableName); + if (table != null) { + tables.add(table); + } + } catch (NoSuchObjectException err) { + // Ignore error, just return the valid tables that are found. + } + } + return tables; + } + + + @Override + public boolean tableExists(String databaseName, String tableName) throws MetaException, + TException, UnknownDBException { + // First check temp tables + org.apache.hadoop.hive.metastore.api.Table table = getTempTable(databaseName, tableName); + if (table != null) { + return true; + } + + // Try underlying client + return super.tableExists(databaseName, tableName); + } + + public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + // First try temp table + org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); + if (old_tbl != null) { + alterTempTable(dbname, tbl_name, old_tbl, new_tbl, envContext); + return; + } + + // Try underlying client + super.alter_table(dbname, tbl_name, new_tbl, envContext); + } + + private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, + EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + + String dbName = tbl.getDbName(); + String tblName = tbl.getTableName(); + Map tables = SessionState.get().getTempTables().get(dbName); + if (tables != null && tables.containsKey(tblName)) { + throw new MetaException("Temporary table " + dbName + "." + tblName + " already exists"); + } + + // Create temp table directory + Warehouse wh = getWh(); + Path tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); + if (tblPath != null) { + if (!wh.isDir(tblPath)) { + if (!wh.mkdirs(tblPath, true)) { + throw new MetaException(tblPath + + " is not a directory or unable to create one"); + } + } + } else { + throw new MetaException("Temp table path not set for " + tbl.getTableName()); + } + + // Add temp table info to current session + Table tTable = new Table(tbl); + if (tables == null) { + tables = new HashMap(); + SessionState.get().getTempTables().put(dbName, tables); + } + tables.put(tblName, tTable); + } + + private org.apache.hadoop.hive.metastore.api.Table getTempTable(String dbName, String tableName) { + Map tables = SessionState.get().getTempTables().get(dbName); + if (tables != null) { + Table table = tables.get(tableName); + if (table != null) { + return table.getTTable(); + } + } + return null; + } + + private void alterTempTable(String dbname, String tbl_name, + org.apache.hadoop.hive.metastore.api.Table oldt, + org.apache.hadoop.hive.metastore.api.Table newt, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + Table newTable = new Table(newt); + + // Disallow changing temp table location + if (!newt.getSd().getLocation().equals(oldt.getSd().getLocation())) { + throw new MetaException("Temp table location cannot be changed"); + } + + String newDbName = newTable.getDbName(); + String newTableName = newTable.getTableName(); + if (!newDbName.equals(oldt.getDbName()) || !newTableName.equals(oldt.getTableName())) { + // Table was renamed - remove old temp table entry, and add new entry to list of temp tables. + // Note that for temp tables there is no need to rename directories + SessionState.get().getTempTables().get(dbname).remove(tbl_name); + Map tables = SessionState.get().getTempTables().get(newDbName); + if (tables == null) { + tables = new HashMap(); + SessionState.get().getTempTables().put(newDbName, tables); + } + tables.put(newTableName, newTable); + } else { + SessionState.get().getTempTables().get(dbname).put(tbl_name, newTable); + } + } + + private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boolean deleteData, + EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + + String dbName = table.getDbName().toLowerCase(); + String tableName = table.getTableName().toLowerCase(); + + // Determine the temp table path + Path tablePath = null; + String pathStr = table.getSd().getLocation(); + if (pathStr != null) { + try { + tablePath = new Path(table.getSd().getLocation()); + if (!getWh().isWritable(tablePath.getParent())) { + throw new MetaException("Table metadata not deleted since " + tablePath.getParent() + + " is not writable by " + conf.getUser()); + } + } catch (IOException err) { + throw new MetaException(err.getMessage()); + } + } + + SessionState.get().getTempTables().get(dbName).remove(tableName); + + // Delete table data + if (deleteData && !MetaStoreUtils.isExternalTable(table)) { + try { + getWh().deleteDir(tablePath, true); + } catch (Exception err) { + LOG.error("Failed to delete temp table directory: " + tablePath + " " + err.getMessage()); + // Forgive error + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 3df2690..563dbd1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -996,4 +996,8 @@ public String getCompleteName() { throw new RuntimeException("Cannot get path ", e); } } + + public boolean isTemporary() { + return tTable.isTemporary(); + } }; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 6d958fd..b765d29 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -804,7 +804,7 @@ databaseComment createTableStatement @init { pushMsg("create table statement", state); } @after { popMsg(state); } - : KW_CREATE (ext=KW_EXTERNAL)? KW_TABLE ifNotExists? name=tableName + : KW_CREATE (temp=KW_TEMPORARY)? (ext=KW_EXTERNAL)? KW_TABLE ifNotExists? name=tableName ( like=KW_LIKE likeName=tableName tableLocation? tablePropertiesPrefixed? @@ -819,7 +819,7 @@ createTableStatement tablePropertiesPrefixed? (KW_AS selectStatementWithCTE)? ) - -> ^(TOK_CREATETABLE $name $ext? ifNotExists? + -> ^(TOK_CREATETABLE $name $temp? $ext? ifNotExists? ^(TOK_LIKETABLE $likeName?) columnNameTypeList? tableComment? diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 71471f4..83b6304 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -92,6 +92,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { table.getTableName(), false, // isExternal: set to false here, can be overwritten by the // IMPORT stmt + table.isTemporary(), table.getSd().getCols(), table.getPartitionKeys(), table.getSd().getBucketCols(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 28d0e1c..aa98a4d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX; import java.io.IOException; import java.io.Serializable; @@ -31,6 +32,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; @@ -9790,6 +9792,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) Map tblProps = null; boolean ifNotExists = false; boolean isExt = false; + boolean isTemporary = false; ASTNode selectStmt = null; final int CREATE_TABLE = 0; // regular CREATE TABLE final int CTLT = 1; // CREATE TABLE LIKE ... (CTLT) @@ -9826,6 +9829,9 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case HiveParser.KW_EXTERNAL: isExt = true; break; + case HiveParser.KW_TEMPORARY: + isTemporary = true; + break; case HiveParser.TOK_LIKETABLE: if (child.getChildCount() > 0) { likeTableName = getUnescapedName((ASTNode) child.getChild(0)); @@ -9949,6 +9955,21 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0]; Database database = getDatabase(dbName); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); + + if (isTemporary && location == null) { + // for temporary tables we set the location to something in the session's scratch dir + // it has the same life cycle as the tmp table + try { + // Generate a unique ID for temp table path. + // This path will be fixed for the life of the temp table. + Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString()); + path = Warehouse.getDnsPath(path, conf); + location = path.toString(); + } catch (MetaException err) { + throw new SemanticException(err); + } + } + // Handle different types of CREATE TABLE command CreateTableDesc crtTblDesc = null; switch (command_type) { @@ -9956,7 +9977,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case CREATE_TABLE: // REGULAR CREATE TABLE DDL tblProps = addDefaultProperties(tblProps); - crtTblDesc = new CreateTableDesc(tableName, isExt, cols, partCols, + crtTblDesc = new CreateTableDesc(tableName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, @@ -9978,7 +9999,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case CTLT: // create table like tblProps = addDefaultProperties(tblProps); - CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, + CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, isTemporary, storageFormat.inputFormat, storageFormat.outputFormat, location, shared.serde, shared.serdeProps, tblProps, ifNotExists, likeTableName); SessionState.get().setCommandType(HiveOperation.CREATETABLE); @@ -10000,7 +10021,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) tblProps = addDefaultProperties(tblProps); - crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, cols, partCols, + crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 2537b75..52a8096 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -77,11 +77,12 @@ List skewedColNames; List> skewedColValues; boolean isStoredAsSubDirectories = false; + boolean isTemporary = false; public CreateTableDesc() { } - public CreateTableDesc(String databaseName, String tableName, boolean isExternal, + public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -92,7 +93,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal Map tblProps, boolean ifNotExists, List skewedColNames, List> skewedColValues) { - this(tableName, isExternal, cols, partCols, + this(tableName, isExternal, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, @@ -101,7 +102,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal this.databaseName = databaseName; } - public CreateTableDesc(String tableName, boolean isExternal, + public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary, List cols, List partCols, List bucketCols, List sortCols, int numBuckets, String fieldDelim, String fieldEscape, String collItemDelim, @@ -113,6 +114,7 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean ifNotExists, List skewedColNames, List> skewedColValues) { this.tableName = tableName; this.isExternal = isExternal; + this.isTemporary = isTemporary; this.bucketCols = new ArrayList(bucketCols); this.sortCols = new ArrayList(sortCols); this.collItemDelim = collItemDelim; @@ -535,4 +537,19 @@ public void setNullFormat(String nullFormat) { this.nullFormat = nullFormat; } + /** + * @return the isTemporary + */ + @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) + public boolean isTemporary() { + return isTemporary; + } + + /** + * @param isTemporary table is Temporary or not. + */ + public void setTemporary(boolean isTemporary) { + this.isTemporary = isTemporary; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java index cb5d64c..c9ce30f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java @@ -38,16 +38,18 @@ Map tblProps; boolean ifNotExists; String likeTableName; + boolean isTemporary = false; public CreateTableLikeDesc() { } - public CreateTableLikeDesc(String tableName, boolean isExternal, + public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary, String defaultInputFormat, String defaultOutputFormat, String location, String defaultSerName, Map defaultSerdeProps, Map tblProps, boolean ifNotExists, String likeTableName) { this.tableName = tableName; this.isExternal = isExternal; + this.isTemporary = isTemporary; this.defaultInputFormat=defaultInputFormat; this.defaultOutputFormat=defaultOutputFormat; this.defaultSerName=defaultSerName; @@ -168,4 +170,20 @@ public void setLikeTableName(String likeTableName) { public void setTblProps(Map tblProps) { this.tblProps = tblProps; } + + /** + * @return the isTemporary + */ + @Explain(displayName = "isTemporary", displayOnlyOnTrue = true) + public boolean isTemporary() { + return isTemporary; + } + + /** + * @param isTemporary table is Temporary or not. + */ + public void setTemporary(boolean isTemporary) { + this.isTemporary = isTemporary; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 7feba1d..d3f8fb9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -56,6 +57,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; @@ -76,6 +78,12 @@ public class SessionState { private static final Log LOG = LogFactory.getLog(SessionState.class); + public static final String TMP_PREFIX = "_tmp_space.db"; + public static final String LOCAL_SESSION_PATH_KEY = "_hive.local.session.path"; + public static final String HDFS_SESSION_PATH_KEY = "_hive.hdfs.session.path"; + public static final String TMP_TABLE_SPACE_KEY = "_hive.tmp_table_space"; + private Map> tempTables = new HashMap>(); + protected ClassLoader parentLoader; /** @@ -175,6 +183,15 @@ private final String userName; + // scratch path to use for all non-local (ie. hdfs) file system tmp folders + private Path hdfsSessionPath; + + // sub dir of hdfs session path. used to keep tmp tables + private Path hdfsTmpTableSpace; + + // scratch directory to use for local file system tmp folders + private Path localSessionPath; + /** * Get the lineage state stored in this session. * @@ -342,6 +359,7 @@ public static SessionState start(SessionState startSs) { Hive.get(new HiveConf(startSs.conf)).getMSC(); ShimLoader.getHadoopShims().getUGIForConf(startSs.conf); FileSystem.get(startSs.conf); + startSs.createSessionPaths(startSs.conf); } catch (Exception e) { // catch-all due to some exec time dependencies on session state // that would cause ClassNoFoundException otherwise @@ -364,6 +382,79 @@ public static SessionState start(SessionState startSs) { return startSs; } + public static Path getLocalSessionPath(Configuration conf) { + SessionState ss = SessionState.get(); + if (ss != null) { + return ss.localSessionPath; + } else { + return new Path(conf.get(LOCAL_SESSION_PATH_KEY)); + } + } + + public static Path getHDFSSessionPath(Configuration conf) { + SessionState ss = SessionState.get(); + if (ss != null) { + return ss.hdfsSessionPath; + } else { + return new Path(conf.get(HDFS_SESSION_PATH_KEY)); + } + } + + public static Path getTempTableSpace(Configuration conf) { + SessionState ss = SessionState.get(); + if (ss != null) { + return ss.getTempTableSpace(); + } else { + return new Path(conf.get(TMP_TABLE_SPACE_KEY)); + } + } + + public Path getTempTableSpace() { + return this.hdfsTmpTableSpace; + } + + private void dropSessionPaths(Configuration conf) throws IOException { + hdfsSessionPath.getFileSystem(conf).delete(hdfsSessionPath, true); + localSessionPath.getFileSystem(conf).delete(localSessionPath, true); + } + + private void createSessionPaths(Configuration conf) throws IOException { + + String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + String sessionId = getSessionId(); + + // local & non-local tmp location is configurable. however it is the same across + // all external file systems + hdfsSessionPath = + new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR), + sessionId); + createPath(conf, hdfsSessionPath, scratchDirPermission); + conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString()); + + localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), + sessionId); + createPath(conf, localSessionPath, scratchDirPermission); + conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString()); + hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX); + createPath(conf, hdfsTmpTableSpace, scratchDirPermission); + conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString()); + } + + private void createPath(Configuration conf, Path p, String perm) throws IOException { + FileSystem fs = p.getFileSystem(conf); + p = new Path(fs.makeQualified(p).toString()); + FsPermission fsPermission = new FsPermission(Short.parseShort(perm.trim(), 8)); + + if (!Utilities.createDirsWithPermission(conf, p, fsPermission)) { + throw new IOException("Cannot make directory: " + + p.toString()); + } + + // best effort to clean up if we don't shut down properly + fs.deleteOnExit(p); + } + + /** * Setup authentication and authorization plugins for this session. * @param startSs @@ -932,6 +1023,8 @@ public void close() throws IOException { } finally { tezSessionState = null; } + + dropSessionPaths(conf); } public AuthorizationMode getAuthorizationMode(){ @@ -1012,4 +1105,12 @@ public boolean hasAddedResource() { public void setAddedResource(boolean addedResouce) { this.addedResource = addedResouce; } + + public Map> getTempTables() { + return tempTables; + } + + public void setTempTables(Map> tempTables) { + this.tempTables = tempTables; + } } diff --git a/ql/src/test/queries/clientpositive/temp_table.q b/ql/src/test/queries/clientpositive/temp_table.q new file mode 100644 index 0000000..c6cdfbc --- /dev/null +++ b/ql/src/test/queries/clientpositive/temp_table.q @@ -0,0 +1,40 @@ +EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0; +CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0; + +EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1; +CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1; + +DESCRIBE foo; +DESCRIBE bar; + +explain select * from foo limit 10; +select * from foo limit 10; + +explain select * from (select * from foo union all select * from bar) u order by key limit 10; +select * from (select * from foo union all select * from bar) u order by key limit 10; + +CREATE TEMPORARY TABLE baz LIKE foo; + +INSERT OVERWRITE TABLE baz SELECT * from foo; + +CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key; + +select * from bay limit 10; + +SHOW TABLES; + +CREATE DATABASE two; + +USE two; + +SHOW TABLES; + +CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo; + +SHOW TABLES; + +use default; + +DROP DATABASE two CASCADE; + +DROP TABLE bay; diff --git a/ql/src/test/results/clientpositive/temp_table.q.out b/ql/src/test/results/clientpositive/temp_table.q.out new file mode 100644 index 0000000..b779971 --- /dev/null +++ b/ql/src/test/results/clientpositive/temp_table.q.out @@ -0,0 +1,457 @@ +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-8 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-8 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 0) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-8 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: foo + isTemporary: true + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@foo +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-8 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-8 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 1) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-8 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: bar + isTemporary: true + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bar +PREHOOK: query: DESCRIBE foo +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@foo +POSTHOOK: query: DESCRIBE foo +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@foo +key string +value string +PREHOOK: query: DESCRIBE bar +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@bar +POSTHOOK: query: DESCRIBE bar +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@bar +key string +value string +PREHOOK: query: explain select * from foo limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from foo limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: foo + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select * from foo limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from foo limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +238 val_238 +86 val_86 +278 val_278 +98 val_98 +484 val_484 +150 val_150 +224 val_224 +66 val_66 +128 val_128 +146 val_146 +PREHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: bar + Statistics: Num rows: 253 Data size: 2703 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 253 Data size: 2703 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string), _col1 (type: string) + TableScan + alias: foo + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Union + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string), _col1 (type: string) + Reduce Operator Tree: + Extract + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bar +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bar +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@baz +PREHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +PREHOOK: Output: default@baz +POSTHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +POSTHOOK: Output: default@baz +POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bay +PREHOOK: query: select * from bay limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bay +#### A masked pattern was here #### +POSTHOOK: query: select * from bay limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bay +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +alltypesorc +bar +bay +baz +foo +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: CREATE DATABASE two +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: CREATE DATABASE two +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE two +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE two +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@foo +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@foo +POSTHOOK: Output: two@foo +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +foo +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE two CASCADE +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:two +PREHOOK: Output: database:two +PREHOOK: Output: two@foo +POSTHOOK: query: DROP DATABASE two CASCADE +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:two +POSTHOOK: Output: database:two +POSTHOOK: Output: two@foo +PREHOOK: query: DROP TABLE bay +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bay +PREHOOK: Output: default@bay +POSTHOOK: query: DROP TABLE bay +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bay +POSTHOOK: Output: default@bay diff --git a/ql/src/test/results/clientpositive/tez/temp_table.q.out b/ql/src/test/results/clientpositive/tez/temp_table.q.out new file mode 100644 index 0000000..b3bd24b --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/temp_table.q.out @@ -0,0 +1,383 @@ +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 0) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.foo + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: foo + isTemporary: true + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@foo +PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 1) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bar + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Create Table Operator: + Create Table + columns: key string, value string + input format: org.apache.hadoop.mapred.TextInputFormat +#### A masked pattern was here #### + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + name: bar + isTemporary: true + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bar +PREHOOK: query: DESCRIBE foo +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@foo +POSTHOOK: query: DESCRIBE foo +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@foo +key string +value string +PREHOOK: query: DESCRIBE bar +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@bar +POSTHOOK: query: DESCRIBE bar +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@bar +key string +value string +PREHOOK: query: explain select * from foo limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from foo limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: foo + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select * from foo limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from foo limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +238 val_238 +86 val_86 +278 val_278 +98 val_98 +484 val_484 +150 val_150 +224 val_224 +66 val_66 +128 val_128 +146 val_146 +PREHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Union 2 (CONTAINS) + Map 4 <- Union 2 (CONTAINS) + Reducer 3 <- Union 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: bar + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + value expressions: _col0 (type: string), _col1 (type: string) + Map 4 + Map Operator Tree: + TableScan + alias: foo + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + value expressions: _col0 (type: string), _col1 (type: string) + Reducer 3 + Reduce Operator Tree: + Extract + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 2 + Vertex: Union 2 + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bar +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bar +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@baz +PREHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +PREHOOK: Output: default@baz +POSTHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +POSTHOOK: Output: default@baz +POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@bay +PREHOOK: query: select * from bay limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@bay +#### A masked pattern was here #### +POSTHOOK: query: select * from bay limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bay +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +alltypesorc +bar +bay +baz +foo +src +src1 +src_json +src_sequencefile +src_thrift +srcbucket +srcbucket2 +srcpart +PREHOOK: query: CREATE DATABASE two +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: CREATE DATABASE two +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: USE two +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: USE two +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@foo +POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@foo +POSTHOOK: Output: two@foo +PREHOOK: query: SHOW TABLES +PREHOOK: type: SHOWTABLES +POSTHOOK: query: SHOW TABLES +POSTHOOK: type: SHOWTABLES +foo +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: DROP DATABASE two CASCADE +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:two +PREHOOK: Output: database:two +PREHOOK: Output: two@foo +POSTHOOK: query: DROP DATABASE two CASCADE +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:two +POSTHOOK: Output: database:two +POSTHOOK: Output: two@foo +PREHOOK: query: DROP TABLE bay +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bay +PREHOOK: Output: default@bay +POSTHOOK: query: DROP TABLE bay +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bay +POSTHOOK: Output: default@bay