diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index dcfe29a..0ac97c2 100644
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -366,7 +366,7 @@
// Parameters to copy over when creating a table with Create Table Like.
DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", ""),
METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl",
- "org.apache.hadoop.hive.metastore.ObjectStore"),
+ "org.apache.hadoop.hive.metastore.SessionObjectStore"),
METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName",
"org.apache.derby.jdbc.EmbeddedDriver"),
METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
diff --git conf/hive-default.xml.template conf/hive-default.xml.template
index a75f569..6e64bb3 100644
--- conf/hive-default.xml.template
+++ conf/hive-default.xml.template
@@ -351,7 +351,7 @@
hive.metastore.rawstore.impl
- org.apache.hadoop.hive.metastore.ObjectStore
+ org.apache.hadoop.hive.metastore.SessionObjectStore
Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects such as table, database
diff --git itests/qtest/pom.xml itests/qtest/pom.xml
index 1a19610..00b2df4 100644
--- itests/qtest/pom.xml
+++ itests/qtest/pom.xml
@@ -39,7 +39,7 @@
stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q,quotedid_smb.q,file_with_header_footer.q,external_table_with_space_in_location_path.q,root_dir_external_table.q,index_bitmap3.q,ql_rewrite_gbtoidx.q,index_bitmap_auto.q,udf_using.q
cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q,file_with_header_footer_negative.q,udf_local_resource.q
tez_fsstat.q,mapjoin_decimal.q,tez_join_tests.q,tez_joins_explain.q,mrr.q,tez_dml.q,tez_insert_overwrite_local_directory_1.q,tez_union.q,bucket_map_join_tez1.q,bucket_map_join_tez2.q,tez_schema_evolution.q
- cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q
+ temp_table.q,cross_product_check_1.q,cross_product_check_2.q,dynpart_sort_opt_vectorization.q,dynpart_sort_optimization.q,orc_analyze.q,join0.q,join1.q,auto_join0.q,auto_join1.q,bucket2.q,bucket3.q,bucket4.q,count.q,create_merge_compressed.q,cross_join.q,ctas.q,custom_input_output_format.q,disable_merge_for_bucketing.q,enforce_order.q,filter_join_breaktask.q,filter_join_breaktask2.q,groupby1.q,groupby2.q,groupby3.q,having.q,insert1.q,insert_into1.q,insert_into2.q,leftsemijoin.q,limit_pushdown.q,load_dyn_part1.q,load_dyn_part2.q,load_dyn_part3.q,mapjoin_mapjoin.q,mapreduce1.q,mapreduce2.q,merge1.q,merge2.q,metadata_only_queries.q,sample1.q,subquery_in.q,subquery_exists.q,vectorization_15.q,ptf.q,stats_counter.q,stats_noscan_1.q,stats_counter_partitioned.q,union2.q,union3.q,union4.q,union5.q,union6.q,union7.q,union8.q,union9.q,transform1.q,transform2.q,transform_ppr1.q,transform_ppr2.q,script_env_var1.q,script_env_var2.q,script_pipe.q,scriptfile1.q
add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rename.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_overwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q
diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift
index eef1b80..bc91bbb 100755
--- metastore/if/hive_metastore.thrift
+++ metastore/if/hive_metastore.thrift
@@ -230,6 +230,7 @@ struct Table {
11: string viewExpandedText, // expanded view text, null for non-view
12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
13: optional PrincipalPrivilegeSet privileges,
+ 14: optional bool temporary=false
}
struct Partition {
diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 9e440bb..99f7c25 100644
--- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -2455,8 +2455,8 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) {
swap(a.__isset, b.__isset);
}
-const char* Table::ascii_fingerprint = "68640B4B66B355CF317429AF70D2C260";
-const uint8_t Table::binary_fingerprint[16] = {0x68,0x64,0x0B,0x4B,0x66,0xB3,0x55,0xCF,0x31,0x74,0x29,0xAF,0x70,0xD2,0xC2,0x60};
+const char* Table::ascii_fingerprint = "29EFB2A5970EF572039E5D94CC78AA85";
+const uint8_t Table::binary_fingerprint[16] = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85};
uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
@@ -2609,6 +2609,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 14:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->temporary);
+ this->__isset.temporary = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -2695,6 +2703,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += this->privileges.write(oprot);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.temporary) {
+ xfer += oprot->writeFieldBegin("temporary", ::apache::thrift::protocol::T_BOOL, 14);
+ xfer += oprot->writeBool(this->temporary);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -2715,6 +2728,7 @@ void swap(Table &a, Table &b) {
swap(a.viewExpandedText, b.viewExpandedText);
swap(a.tableType, b.tableType);
swap(a.privileges, b.privileges);
+ swap(a.temporary, b.temporary);
swap(a.__isset, b.__isset);
}
diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 43869c2..8dd0ef3 100644
--- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -1332,7 +1332,7 @@ class StorageDescriptor {
void swap(StorageDescriptor &a, StorageDescriptor &b);
typedef struct _Table__isset {
- _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false) {}
+ _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {}
bool tableName;
bool dbName;
bool owner;
@@ -1346,15 +1346,16 @@ typedef struct _Table__isset {
bool viewExpandedText;
bool tableType;
bool privileges;
+ bool temporary;
} _Table__isset;
class Table {
public:
- static const char* ascii_fingerprint; // = "68640B4B66B355CF317429AF70D2C260";
- static const uint8_t binary_fingerprint[16]; // = {0x68,0x64,0x0B,0x4B,0x66,0xB3,0x55,0xCF,0x31,0x74,0x29,0xAF,0x70,0xD2,0xC2,0x60};
+ static const char* ascii_fingerprint; // = "29EFB2A5970EF572039E5D94CC78AA85";
+ static const uint8_t binary_fingerprint[16]; // = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85};
- Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType() {
+ Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) {
}
virtual ~Table() throw() {}
@@ -1372,6 +1373,7 @@ class Table {
std::string viewExpandedText;
std::string tableType;
PrincipalPrivilegeSet privileges;
+ bool temporary;
_Table__isset __isset;
@@ -1428,6 +1430,11 @@ class Table {
__isset.privileges = true;
}
+ void __set_temporary(const bool val) {
+ temporary = val;
+ __isset.temporary = true;
+ }
+
bool operator == (const Table & rhs) const
{
if (!(tableName == rhs.tableName))
@@ -1458,6 +1465,10 @@ class Table {
return false;
else if (__isset.privileges && !(privileges == rhs.privileges))
return false;
+ if (__isset.temporary != rhs.__isset.temporary)
+ return false;
+ else if (__isset.temporary && !(temporary == rhs.temporary))
+ return false;
return true;
}
bool operator != (const Table &rhs) const {
diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 1e7fca3..229a819 100644
--- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -47,6 +47,7 @@
private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)11);
private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12);
private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13);
+ private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14);
private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>();
static {
@@ -67,6 +68,7 @@
private String viewExpandedText; // required
private String tableType; // required
private PrincipalPrivilegeSet privileges; // optional
+ private boolean temporary; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -82,7 +84,8 @@
VIEW_ORIGINAL_TEXT((short)10, "viewOriginalText"),
VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"),
TABLE_TYPE((short)12, "tableType"),
- PRIVILEGES((short)13, "privileges");
+ PRIVILEGES((short)13, "privileges"),
+ TEMPORARY((short)14, "temporary");
private static final Map byName = new HashMap();
@@ -123,6 +126,8 @@ public static _Fields findByThriftId(int fieldId) {
return TABLE_TYPE;
case 13: // PRIVILEGES
return PRIVILEGES;
+ case 14: // TEMPORARY
+ return TEMPORARY;
default:
return null;
}
@@ -166,8 +171,9 @@ public String getFieldName() {
private static final int __CREATETIME_ISSET_ID = 0;
private static final int __LASTACCESSTIME_ISSET_ID = 1;
private static final int __RETENTION_ISSET_ID = 2;
+ private static final int __TEMPORARY_ISSET_ID = 3;
private byte __isset_bitfield = 0;
- private _Fields optionals[] = {_Fields.PRIVILEGES};
+ private _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -200,11 +206,15 @@ public String getFieldName() {
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
+ tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
}
public Table() {
+ this.temporary = false;
+
}
public Table(
@@ -293,6 +303,7 @@ public Table(Table other) {
if (other.isSetPrivileges()) {
this.privileges = new PrincipalPrivilegeSet(other.privileges);
}
+ this.temporary = other.temporary;
}
public Table deepCopy() {
@@ -317,6 +328,8 @@ public void clear() {
this.viewExpandedText = null;
this.tableType = null;
this.privileges = null;
+ this.temporary = false;
+
}
public String getTableName() {
@@ -641,6 +654,28 @@ public void setPrivilegesIsSet(boolean value) {
}
}
+ public boolean isTemporary() {
+ return this.temporary;
+ }
+
+ public void setTemporary(boolean temporary) {
+ this.temporary = temporary;
+ setTemporaryIsSet(true);
+ }
+
+ public void unsetTemporary() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TEMPORARY_ISSET_ID);
+ }
+
+ /** Returns true if field temporary is set (has been assigned a value) and false otherwise */
+ public boolean isSetTemporary() {
+ return EncodingUtils.testBit(__isset_bitfield, __TEMPORARY_ISSET_ID);
+ }
+
+ public void setTemporaryIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value);
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE_NAME:
@@ -747,6 +782,14 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case TEMPORARY:
+ if (value == null) {
+ unsetTemporary();
+ } else {
+ setTemporary((Boolean)value);
+ }
+ break;
+
}
}
@@ -791,6 +834,9 @@ public Object getFieldValue(_Fields field) {
case PRIVILEGES:
return getPrivileges();
+ case TEMPORARY:
+ return Boolean.valueOf(isTemporary());
+
}
throw new IllegalStateException();
}
@@ -828,6 +874,8 @@ public boolean isSet(_Fields field) {
return isSetTableType();
case PRIVILEGES:
return isSetPrivileges();
+ case TEMPORARY:
+ return isSetTemporary();
}
throw new IllegalStateException();
}
@@ -962,6 +1010,15 @@ public boolean equals(Table that) {
return false;
}
+ boolean this_present_temporary = true && this.isSetTemporary();
+ boolean that_present_temporary = true && that.isSetTemporary();
+ if (this_present_temporary || that_present_temporary) {
+ if (!(this_present_temporary && that_present_temporary))
+ return false;
+ if (this.temporary != that.temporary)
+ return false;
+ }
+
return true;
}
@@ -1034,6 +1091,11 @@ public int hashCode() {
if (present_privileges)
builder.append(privileges);
+ boolean present_temporary = true && (isSetTemporary());
+ builder.append(present_temporary);
+ if (present_temporary)
+ builder.append(temporary);
+
return builder.toHashCode();
}
@@ -1175,6 +1237,16 @@ public int compareTo(Table other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTemporary()).compareTo(typedOther.isSetTemporary());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTemporary()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.temporary, typedOther.temporary);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1288,6 +1360,12 @@ public String toString() {
}
first = false;
}
+ if (isSetTemporary()) {
+ if (!first) sb.append(", ");
+ sb.append("temporary:");
+ sb.append(this.temporary);
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1468,6 +1546,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 14: // TEMPORARY
+ if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+ struct.temporary = iprot.readBool();
+ struct.setTemporaryIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -1557,6 +1643,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTemporary()) {
+ oprot.writeFieldBegin(TEMPORARY_FIELD_DESC);
+ oprot.writeBool(struct.temporary);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1614,7 +1705,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw
if (struct.isSetPrivileges()) {
optionals.set(12);
}
- oprot.writeBitSet(optionals, 13);
+ if (struct.isSetTemporary()) {
+ optionals.set(13);
+ }
+ oprot.writeBitSet(optionals, 14);
if (struct.isSetTableName()) {
oprot.writeString(struct.tableName);
}
@@ -1667,12 +1761,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw
if (struct.isSetPrivileges()) {
struct.privileges.write(oprot);
}
+ if (struct.isSetTemporary()) {
+ oprot.writeBool(struct.temporary);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(13);
+ BitSet incoming = iprot.readBitSet(14);
if (incoming.get(0)) {
struct.tableName = iprot.readString();
struct.setTableNameIsSet(true);
@@ -1748,6 +1845,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws
struct.privileges.read(iprot);
struct.setPrivilegesIsSet(true);
}
+ if (incoming.get(13)) {
+ struct.temporary = iprot.readBool();
+ struct.setTemporaryIsSet(true);
+ }
}
}
diff --git metastore/src/gen/thrift/gen-php/metastore/Types.php metastore/src/gen/thrift/gen-php/metastore/Types.php
index 46f6a04..3db3ded 100644
--- metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -3162,6 +3162,7 @@ class Table {
public $viewExpandedText = null;
public $tableType = null;
public $privileges = null;
+ public $temporary = false;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -3233,6 +3234,10 @@ class Table {
'type' => TType::STRUCT,
'class' => '\metastore\PrincipalPrivilegeSet',
),
+ 14 => array(
+ 'var' => 'temporary',
+ 'type' => TType::BOOL,
+ ),
);
}
if (is_array($vals)) {
@@ -3275,6 +3280,9 @@ class Table {
if (isset($vals['privileges'])) {
$this->privileges = $vals['privileges'];
}
+ if (isset($vals['temporary'])) {
+ $this->temporary = $vals['temporary'];
+ }
}
}
@@ -3414,6 +3422,13 @@ class Table {
$xfer += $input->skip($ftype);
}
break;
+ case 14:
+ if ($ftype == TType::BOOL) {
+ $xfer += $input->readBool($this->temporary);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -3523,6 +3538,11 @@ class Table {
$xfer += $this->privileges->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->temporary !== null) {
+ $xfer += $output->writeFieldBegin('temporary', TType::BOOL, 14);
+ $xfer += $output->writeBool($this->temporary);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index c4b583b..ebfc813 100644
--- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -2153,6 +2153,7 @@ class Table:
- viewExpandedText
- tableType
- privileges
+ - temporary
"""
thrift_spec = (
@@ -2170,9 +2171,10 @@ class Table:
(11, TType.STRING, 'viewExpandedText', None, None, ), # 11
(12, TType.STRING, 'tableType', None, None, ), # 12
(13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13
+ (14, TType.BOOL, 'temporary', None, False, ), # 14
)
- def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None,):
+ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],):
self.tableName = tableName
self.dbName = dbName
self.owner = owner
@@ -2186,6 +2188,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las
self.viewExpandedText = viewExpandedText
self.tableType = tableType
self.privileges = privileges
+ self.temporary = temporary
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -2275,6 +2278,11 @@ def read(self, iprot):
self.privileges.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 14:
+ if ftype == TType.BOOL:
+ self.temporary = iprot.readBool();
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -2344,6 +2352,10 @@ def write(self, oprot):
oprot.writeFieldBegin('privileges', TType.STRUCT, 13)
self.privileges.write(oprot)
oprot.writeFieldEnd()
+ if self.temporary is not None:
+ oprot.writeFieldBegin('temporary', TType.BOOL, 14)
+ oprot.writeBool(self.temporary)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
diff --git metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 79b7a1a..ac122a4 100644
--- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -531,6 +531,7 @@ class Table
VIEWEXPANDEDTEXT = 11
TABLETYPE = 12
PRIVILEGES = 13
+ TEMPORARY = 14
FIELDS = {
TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
@@ -545,7 +546,8 @@ class Table
VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'},
VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'},
TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'},
- PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}
+ PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
+ TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true}
}
def struct_fields; FIELDS; end
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 2e41867..4fcff7f 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -888,7 +888,7 @@ public Table getTable(String dbName, String tableName) throws MetaException {
return getTables(dbName, ".*");
}
- private MTable getMTable(String db, String table) {
+ protected MTable getMTable(String db, String table) {
MTable mtbl = null;
boolean commited = false;
try {
@@ -979,7 +979,7 @@ private Table convertToTable(MTable mtbl) throws MetaException {
mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
}
- private MTable convertToMTable(Table tbl) throws InvalidObjectException,
+ protected MTable convertToMTable(Table tbl) throws InvalidObjectException,
MetaException {
if (tbl == null) {
return null;
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/SessionObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/SessionObjectStore.java
new file mode 100644
index 0000000..4a3e623
--- /dev/null
+++ metastore/src/java/org/apache/hadoop/hive/metastore/SessionObjectStore.java
@@ -0,0 +1,76 @@
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.model.MTable;
+
+public class SessionObjectStore extends ObjectStore {
+
+ Map> tempTables;
+
+ public SessionObjectStore() {
+ tempTables = new HashMap>();
+ }
+
+ @Override
+ public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+ if ( !tbl.isTemporary() ) {
+ super.createTable(tbl);
+ return;
+ }
+ MTable mtbl = convertToMTable(tbl);
+ String dbName = tbl.getDbName();
+ String tblName = tbl.getTableName();
+ dbName = dbName == null ? "default" : dbName;
+
+ Map tables = tempTables.get(dbName);
+ if ( tables == null ) {
+ tables = new HashMap();
+ tempTables.put(dbName, tables);
+ }
+ tables.put(tblName, mtbl);
+ }
+
+ private MTable getTempTable(String db, String table) {
+ db = db == null ? "default" : db;
+ Map tables = tempTables.get(db);
+ if ( tables != null ) {
+ return tables.get(table);
+ }
+ return null;
+ }
+
+ protected MTable getMTable(String db, String table) {
+ MTable t = super.getMTable(db, table);
+ if ( t == null ) {
+ t = getTempTable(db, table);
+ }
+ return t;
+ }
+
+ @Override
+ public boolean dropTable(String dbName, String tableName) throws MetaException,
+ NoSuchObjectException, InvalidObjectException, InvalidInputException {
+
+ MTable t = getTempTable(dbName, tableName);
+ if ( t != null ) {
+ dbName = dbName == null ? "default" : dbName;
+ Map tables = tempTables.get(dbName);
+ tables.remove(tableName);
+ return true;
+ }
+ return super.dropTable(dbName, tableName);
+ }
+
+}
diff --git ql/src/java/org/apache/hadoop/hive/ql/Context.java ql/src/java/org/apache/hadoop/hive/ql/Context.java
index abc4290..4daefa7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -73,13 +73,13 @@
private final Map pathToCS = new ConcurrentHashMap();
// scratch path to use for all non-local (ie. hdfs) file system tmp folders
- private final Path nonLocalScratchPath;
+ private Path nonLocalScratchPath;
// scratch directory to use for local file system tmp folders
- private final String localScratchDir;
+ private String localScratchDir;
// the permission to scratch directory (local and hdfs)
- private final String scratchDirPermission;
+ private String scratchDirPermission;
// Keeps track of scratch directories created for different scheme/authority
private final Map fsScratchDirs = new HashMap();
@@ -124,12 +124,9 @@ public Context(Configuration conf, String executionId) {
// local & non-local tmp location is configurable. however it is the same across
// all external file systems
- nonLocalScratchPath =
- new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR),
- executionId);
- localScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR),
- executionId).toUri().getPath();
- scratchDirPermission= HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
+ nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId);
+ localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath();
+ scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 7c175aa..315ee53 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4205,6 +4205,8 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
tbl.setSkewedColValues(crtTbl.getSkewedColValues());
}
+ tbl.getTTable().setTemporary(crtTbl.isTemporary());
+
tbl.setStoredAsSubDirectories(crtTbl.isStoredAsSubDirectories());
tbl.setInputFormatClass(crtTbl.getInputFormat());
@@ -4352,6 +4354,8 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveExce
params.putAll(crtTbl.getTblProps());
}
+ tbl.getTTable().setTemporary(crtTbl.isTemporary());
+
if (crtTbl.isExternal()) {
tbl.setProperty("EXTERNAL", "TRUE");
tbl.setTableType(TableType.EXTERNAL_TABLE);
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index af3ecd0..be52b30 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -799,7 +799,7 @@ databaseComment
createTableStatement
@init { pushMsg("create table statement", state); }
@after { popMsg(state); }
- : KW_CREATE (ext=KW_EXTERNAL)? KW_TABLE ifNotExists? name=tableName
+ : KW_CREATE (ext=KW_EXTERNAL|ext=KW_TEMPORARY)? KW_TABLE ifNotExists? name=tableName
( like=KW_LIKE likeName=tableName
tableLocation?
tablePropertiesPrefixed?
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 71471f4..83b6304 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -92,6 +92,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException {
table.getTableName(),
false, // isExternal: set to false here, can be overwritten by the
// IMPORT stmt
+ table.isTemporary(),
table.getSd().getCols(),
table.getPartitionKeys(),
table.getSd().getBucketCols(),
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 49eb83f..0ab1086 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -9793,6 +9793,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb)
Map tblProps = null;
boolean ifNotExists = false;
boolean isExt = false;
+ boolean isTemporary = false;
ASTNode selectStmt = null;
final int CREATE_TABLE = 0; // regular CREATE TABLE
final int CTLT = 1; // CREATE TABLE LIKE ... (CTLT)
@@ -9829,6 +9830,9 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb)
case HiveParser.KW_EXTERNAL:
isExt = true;
break;
+ case HiveParser.KW_TEMPORARY:
+ isTemporary = true;
+ break;
case HiveParser.TOK_LIKETABLE:
if (child.getChildCount() > 0) {
likeTableName = getUnescapedName((ASTNode) child.getChild(0));
@@ -9952,6 +9956,15 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb)
String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0];
Database database = getDatabase(dbName);
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED));
+
+ if (isTemporary) {
+ // for temporary tables we set the location to something in the session's scratch dir
+ // it has the same life cycle as the tmp table
+ Path path = new Path(SessionState.getTmpTableSpace(conf), dbName + "_" + (qualified.length == 1
+ ? qualified[0] : qualified[1]));
+ location = path.toString();
+ }
+
// Handle different types of CREATE TABLE command
CreateTableDesc crtTblDesc = null;
switch (command_type) {
@@ -9959,7 +9972,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb)
case CREATE_TABLE: // REGULAR CREATE TABLE DDL
tblProps = addDefaultProperties(tblProps);
- crtTblDesc = new CreateTableDesc(tableName, isExt, cols, partCols,
+ crtTblDesc = new CreateTableDesc(tableName, isExt, isTemporary, cols, partCols,
bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim,
rowFormatParams.fieldEscape,
rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim,
@@ -9981,7 +9994,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb)
case CTLT: // create table like
tblProps = addDefaultProperties(tblProps);
- CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt,
+ CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, isTemporary,
storageFormat.inputFormat, storageFormat.outputFormat, location,
shared.serde, shared.serdeProps, tblProps, ifNotExists, likeTableName);
SessionState.get().setCommandType(HiveOperation.CREATETABLE);
@@ -10003,7 +10016,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb)
tblProps = addDefaultProperties(tblProps);
- crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, cols, partCols,
+ crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, isTemporary, cols, partCols,
bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim,
rowFormatParams.fieldEscape,
rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim,
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index 2537b75..52a8096 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -77,11 +77,12 @@
List skewedColNames;
List> skewedColValues;
boolean isStoredAsSubDirectories = false;
+ boolean isTemporary = false;
public CreateTableDesc() {
}
- public CreateTableDesc(String databaseName, String tableName, boolean isExternal,
+ public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary,
List cols, List partCols,
List bucketCols, List sortCols, int numBuckets,
String fieldDelim, String fieldEscape, String collItemDelim,
@@ -92,7 +93,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal
Map tblProps,
boolean ifNotExists, List skewedColNames, List> skewedColValues) {
- this(tableName, isExternal, cols, partCols,
+ this(tableName, isExternal, isTemporary, cols, partCols,
bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape,
collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat,
outputFormat, location, serName, storageHandler, serdeProps,
@@ -101,7 +102,7 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal
this.databaseName = databaseName;
}
- public CreateTableDesc(String tableName, boolean isExternal,
+ public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary,
List cols, List partCols,
List bucketCols, List sortCols, int numBuckets,
String fieldDelim, String fieldEscape, String collItemDelim,
@@ -113,6 +114,7 @@ public CreateTableDesc(String tableName, boolean isExternal,
boolean ifNotExists, List skewedColNames, List> skewedColValues) {
this.tableName = tableName;
this.isExternal = isExternal;
+ this.isTemporary = isTemporary;
this.bucketCols = new ArrayList(bucketCols);
this.sortCols = new ArrayList(sortCols);
this.collItemDelim = collItemDelim;
@@ -535,4 +537,19 @@ public void setNullFormat(String nullFormat) {
this.nullFormat = nullFormat;
}
+ /**
+ * @return the isTemporary
+ */
+ @Explain(displayName = "isTemporary", displayOnlyOnTrue = true)
+ public boolean isTemporary() {
+ return isTemporary;
+ }
+
+ /**
+ * @param isTemporary table is Temporary or not.
+ */
+ public void setTemporary(boolean isTemporary) {
+ this.isTemporary = isTemporary;
+ }
+
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java
index cb5d64c..c9ce30f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java
@@ -38,16 +38,18 @@
Map tblProps;
boolean ifNotExists;
String likeTableName;
+ boolean isTemporary = false;
public CreateTableLikeDesc() {
}
- public CreateTableLikeDesc(String tableName, boolean isExternal,
+ public CreateTableLikeDesc(String tableName, boolean isExternal, boolean isTemporary,
String defaultInputFormat, String defaultOutputFormat, String location,
String defaultSerName, Map defaultSerdeProps, Map tblProps,
boolean ifNotExists, String likeTableName) {
this.tableName = tableName;
this.isExternal = isExternal;
+ this.isTemporary = isTemporary;
this.defaultInputFormat=defaultInputFormat;
this.defaultOutputFormat=defaultOutputFormat;
this.defaultSerName=defaultSerName;
@@ -168,4 +170,20 @@ public void setLikeTableName(String likeTableName) {
public void setTblProps(Map tblProps) {
this.tblProps = tblProps;
}
+
+ /**
+ * @return the isTemporary
+ */
+ @Explain(displayName = "isTemporary", displayOnlyOnTrue = true)
+ public boolean isTemporary() {
+ return isTemporary;
+ }
+
+ /**
+ * @param isTemporary table is Temporary or not.
+ */
+ public void setTemporary(boolean isTemporary) {
+ this.isTemporary = isTemporary;
+ }
+
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index dab8610..73b2543 100644
--- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -75,6 +76,11 @@
public class SessionState {
private static final Log LOG = LogFactory.getLog(SessionState.class);
+ public static final String TMP_PREFIX = "_tmp_space.db";
+ public static final String LOCAL_SESSION_PATH_KEY = "_hive.local.session.path";
+ public static final String HDFS_SESSION_PATH_KEY = "_hive.hdfs.session.path";
+ public static final String TMP_TABLE_SPACE_KEY = "_hive.tmp_table_space";
+
protected ClassLoader parentLoader;
/**
@@ -174,6 +180,15 @@
private final String userName;
+ // scratch path to use for all non-local (ie. hdfs) file system tmp folders
+ private Path hdfsSessionPath;
+
+ // sub dir of hdfs session path. used to keep tmp tables
+ private Path hdfsTmpTableSpace;
+
+ // scratch directory to use for local file system tmp folders
+ private Path localSessionPath;
+
/**
* Get the lineage state stored in this session.
*
@@ -341,6 +356,7 @@ public static SessionState start(SessionState startSs) {
Hive.get(new HiveConf(startSs.conf)).getMSC();
ShimLoader.getHadoopShims().getUGIForConf(startSs.conf);
FileSystem.get(startSs.conf);
+ startSs.createSessionPaths(startSs.conf);
} catch (Exception e) {
// catch-all due to some exec time dependencies on session state
// that would cause ClassNoFoundException otherwise
@@ -363,6 +379,76 @@ public static SessionState start(SessionState startSs) {
return startSs;
}
+ public static Path getLocalSessionPath(Configuration conf) {
+ SessionState ss = SessionState.get();
+ if (ss != null) {
+ return ss.localSessionPath;
+ } else {
+ return new Path(conf.get(LOCAL_SESSION_PATH_KEY));
+ }
+ }
+
+ public static Path getHDFSSessionPath(Configuration conf) {
+ SessionState ss = SessionState.get();
+ if (ss != null) {
+ return ss.hdfsSessionPath;
+ } else {
+ return new Path(conf.get(HDFS_SESSION_PATH_KEY));
+ }
+ }
+
+ public static Path getTmpTableSpace(Configuration conf) {
+ SessionState ss = SessionState.get();
+ if (ss != null) {
+ return ss.hdfsTmpTableSpace;
+ } else {
+ return new Path(conf.get(TMP_TABLE_SPACE_KEY));
+ }
+ }
+
+ private void dropSessionPaths(Configuration conf) throws IOException {
+ hdfsSessionPath.getFileSystem(conf).delete(hdfsSessionPath, true);
+ localSessionPath.getFileSystem(conf).delete(localSessionPath, true);
+ }
+
+ private void createSessionPaths(Configuration conf) throws IOException {
+
+ String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
+ String sessionId = getSessionId();
+
+ // local & non-local tmp location is configurable. however it is the same across
+ // all external file systems
+ hdfsSessionPath =
+ new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR),
+ sessionId);
+ createPath(conf, hdfsSessionPath, scratchDirPermission);
+ conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString());
+
+ localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR),
+ sessionId);
+ createPath(conf, localSessionPath, scratchDirPermission);
+ conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString());
+
+ hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX);
+ createPath(conf, hdfsTmpTableSpace, scratchDirPermission);
+ conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString());
+ }
+
+ private void createPath(Configuration conf, Path p, String perm) throws IOException {
+ FileSystem fs = p.getFileSystem(conf);
+ p = new Path(fs.makeQualified(p).toString());
+ FsPermission fsPermission = new FsPermission(Short.parseShort(perm.trim(), 8));
+
+ if (!Utilities.createDirsWithPermission(conf, p, fsPermission)) {
+ throw new IOException("Cannot make directory: "
+ + p.toString());
+ }
+
+ // best effort to clean up if we don't shut down properly
+ fs.deleteOnExit(p);
+ }
+
+
/**
* Setup authentication and authorization plugins for this session.
* @param startSs
@@ -963,6 +1049,8 @@ public void close() throws IOException {
} finally {
tezSessionState = null;
}
+
+ dropSessionPaths(conf);
}
public AuthorizationMode getAuthorizationMode(){
diff --git ql/src/test/queries/clientpositive/temp_table.q ql/src/test/queries/clientpositive/temp_table.q
new file mode 100644
index 0000000..c6cdfbc
--- /dev/null
+++ ql/src/test/queries/clientpositive/temp_table.q
@@ -0,0 +1,40 @@
+EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0;
+CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0;
+
+EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1;
+CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1;
+
+DESCRIBE foo;
+DESCRIBE bar;
+
+explain select * from foo limit 10;
+select * from foo limit 10;
+
+explain select * from (select * from foo union all select * from bar) u order by key limit 10;
+select * from (select * from foo union all select * from bar) u order by key limit 10;
+
+CREATE TEMPORARY TABLE baz LIKE foo;
+
+INSERT OVERWRITE TABLE baz SELECT * from foo;
+
+CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key;
+
+select * from bay limit 10;
+
+SHOW TABLES;
+
+CREATE DATABASE two;
+
+USE two;
+
+SHOW TABLES;
+
+CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo;
+
+SHOW TABLES;
+
+use default;
+
+DROP DATABASE two CASCADE;
+
+DROP TABLE bay;
diff --git ql/src/test/results/clientpositive/temp_table.q.out ql/src/test/results/clientpositive/temp_table.q.out
new file mode 100644
index 0000000..eabdce2
--- /dev/null
+++ ql/src/test/results/clientpositive/temp_table.q.out
@@ -0,0 +1,479 @@
+PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-8 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-8
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key % 2) = 0) (type: boolean)
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.foo
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-8
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+#### A masked pattern was here ####
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ name: foo
+ isTemporary: true
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.foo
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.foo
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@foo
+PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+ Stage-4
+ Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+ Stage-8 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-8
+ Stage-3
+ Stage-5
+ Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key % 2) = 1) (type: boolean)
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bar
+
+ Stage: Stage-7
+ Conditional Operator
+
+ Stage: Stage-4
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-8
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+#### A masked pattern was here ####
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ name: bar
+ isTemporary: true
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+ Stage: Stage-3
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bar
+
+ Stage: Stage-5
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bar
+
+ Stage: Stage-6
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bar
+PREHOOK: query: DESCRIBE foo
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@foo
+POSTHOOK: query: DESCRIBE foo
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@foo
+key string
+value string
+PREHOOK: query: DESCRIBE bar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@bar
+POSTHOOK: query: DESCRIBE bar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@bar
+key string
+value string
+PREHOOK: query: explain select * from foo limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from foo limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 10
+ Processor Tree:
+ TableScan
+ alias: foo
+ Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ ListSink
+
+PREHOOK: query: select * from foo limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@foo
+#### A masked pattern was here ####
+POSTHOOK: query: select * from foo limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@foo
+#### A masked pattern was here ####
+238 val_238
+86 val_86
+278 val_278
+98 val_98
+484 val_484
+150 val_150
+224 val_224
+66 val_66
+128 val_128
+146 val_146
+PREHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: bar
+ Statistics: Num rows: 253 Data size: 2703 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 253 Data size: 2703 Basic stats: COMPLETE Column stats: NONE
+ Union
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ TableScan
+ alias: foo
+ Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+ Union
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Reduce Operator Tree:
+ Extract
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 10
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bar
+PREHOOK: Input: default@foo
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bar
+POSTHOOK: Input: default@foo
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@baz
+PREHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo
+PREHOOK: type: QUERY
+PREHOOK: Input: default@foo
+PREHOOK: Output: default@baz
+POSTHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@foo
+POSTHOOK: Output: default@baz
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bay
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from bay limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bay
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bay limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bay
+#### A masked pattern was here ####
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+alltypesorc
+bar
+bay
+baz
+foo
+src
+src1
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+PREHOOK: query: CREATE DATABASE two
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: CREATE DATABASE two
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: USE two
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: USE two
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@foo
+POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@foo
+POSTHOOK: Output: two@foo
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+foo
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: DROP DATABASE two CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:two
+PREHOOK: Output: database:two
+PREHOOK: Output: two@foo
+POSTHOOK: query: DROP DATABASE two CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:two
+POSTHOOK: Output: database:two
+POSTHOOK: Output: two@foo
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: DROP TABLE bay
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@bay
+PREHOOK: Output: default@bay
+POSTHOOK: query: DROP TABLE bay
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@bay
+POSTHOOK: Output: default@bay
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
diff --git ql/src/test/results/clientpositive/tez/temp_table.q.out ql/src/test/results/clientpositive/tez/temp_table.q.out
new file mode 100644
index 0000000..ea7e7ce
--- /dev/null
+++ ql/src/test/results/clientpositive/tez/temp_table.q.out
@@ -0,0 +1,405 @@
+PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key % 2) = 0) (type: boolean)
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.foo
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+#### A masked pattern was here ####
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ name: foo
+ isTemporary: true
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@foo
+PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key % 2) = 1) (type: boolean)
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bar
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+#### A masked pattern was here ####
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ name: bar
+ isTemporary: true
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bar
+PREHOOK: query: DESCRIBE foo
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@foo
+POSTHOOK: query: DESCRIBE foo
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@foo
+key string
+value string
+PREHOOK: query: DESCRIBE bar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@bar
+POSTHOOK: query: DESCRIBE bar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@bar
+key string
+value string
+PREHOOK: query: explain select * from foo limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from foo limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 10
+ Processor Tree:
+ TableScan
+ alias: foo
+ Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 247 Data size: 2609 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ ListSink
+
+PREHOOK: query: select * from foo limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@foo
+#### A masked pattern was here ####
+POSTHOOK: query: select * from foo limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@foo
+#### A masked pattern was here ####
+238 val_238
+86 val_86
+278 val_278
+98 val_98
+484 val_484
+150 val_150
+224 val_224
+66 val_66
+128 val_128
+146 val_146
+PREHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from (select * from foo union all select * from bar) u order by key limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Union 2 (CONTAINS)
+ Map 4 <- Union 2 (CONTAINS)
+ Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: bar
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: foo
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Extract
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Union 2
+ Vertex: Union 2
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 10
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bar
+PREHOOK: Input: default@foo
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from foo union all select * from bar) u order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bar
+POSTHOOK: Input: default@foo
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TEMPORARY TABLE baz LIKE foo
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@baz
+PREHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo
+PREHOOK: type: QUERY
+PREHOOK: Input: default@foo
+PREHOOK: Output: default@baz
+POSTHOOK: query: INSERT OVERWRITE TABLE baz SELECT * from foo
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@foo
+POSTHOOK: Output: default@baz
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+POSTHOOK: query: CREATE TEMPORARY TABLE bay STORED AS orc AS SELECT * FROM src ORDER BY key
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bay
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from bay limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bay
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bay limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bay
+#### A masked pattern was here ####
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+alltypesorc
+bar
+bay
+baz
+foo
+src
+src1
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+PREHOOK: query: CREATE DATABASE two
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: CREATE DATABASE two
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: USE two
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: USE two
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@foo
+POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@foo
+POSTHOOK: Output: two@foo
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+foo
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: DROP DATABASE two CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:two
+PREHOOK: Output: database:two
+PREHOOK: Output: two@foo
+POSTHOOK: query: DROP DATABASE two CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:two
+POSTHOOK: Output: database:two
+POSTHOOK: Output: two@foo
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: DROP TABLE bay
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@bay
+PREHOOK: Output: default@bay
+POSTHOOK: query: DROP TABLE bay
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@bay
+POSTHOOK: Output: default@bay
+POSTHOOK: Lineage: baz.key SIMPLE [(foo)foo.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: baz.value SIMPLE [(foo)foo.FieldSchema(name:value, type:string, comment:null), ]