Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1350406)
+++ conf/hive-default.xml.template (working copy)
@@ -304,6 +304,12 @@
+ hive.metastore.storequery
+ false
+ Whether to store the query which created the table or partition
+
+
+
hive.default.fileformat
TextFile
Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS <TEXTFILE|SEQUENCEFILE> to override
Index: metastore/scripts/upgrade/mysql/010-HIVE-3071.mysql.sql
===================================================================
--- metastore/scripts/upgrade/mysql/010-HIVE-3071.mysql.sql (revision 0)
+++ metastore/scripts/upgrade/mysql/010-HIVE-3071.mysql.sql (revision 0)
@@ -0,0 +1,2 @@
+SELECT '< HIVE-3071: Add the command which created the table/partition >' AS ' ';
+ALTER TABLE `SDS` ADD COLUMN `CREATION_QUERY` LONGVARCHAR;
Index: metastore/scripts/upgrade/mysql/upgrade-0.9.0-to-0.10.0.mysql.sql
===================================================================
--- metastore/scripts/upgrade/mysql/upgrade-0.9.0-to-0.10.0.mysql.sql (revision 0)
+++ metastore/scripts/upgrade/mysql/upgrade-0.9.0-to-0.10.0.mysql.sql (revision 0)
@@ -0,0 +1,3 @@
+SELECT 'Upgrading MetaStore schema from 0.9.0 to 0.10.0' AS ' ';
+SOURCE 010-HIVE-3071.mysql.sql
+SELECT 'Finished upgrading MetaStore schema from 0.9.0 to 0.10.0' AS ' ';
Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1350406)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy)
@@ -125,6 +125,7 @@
List vals2 = makeVals("2008-07-01 14:13:12", "15");
List vals3 = makeVals("2008-07-02 14:13:12", "15");
List vals4 = makeVals("2008-07-03 14:13:12", "151");
+ List vals5 = makeVals("2008-07-04 14:13:12", "16");
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
@@ -223,6 +224,30 @@
assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
.getPermission());
+ Partition retp5 = client.getPartitionTemplate(dbName, tblName, vals5);
+ assertEquals("New partition does not have values set", vals5, retp5.getValues());
+
+ String invalidDbName = "invalid", invalidTblName = "invalid";
+
+ exceptionThrown = false;
+ try {
+ Partition retp6 = client.getPartitionTemplate(invalidDbName, tblName, vals5);
+ } catch (InvalidObjectException e1) {
+ exceptionThrown = true;
+ }
+ assertTrue("getPartitionTemplate() should have thrown InvalidObjectException"
+ + " due to invalid dbName", exceptionThrown);
+
+ exceptionThrown = false;
+ try {
+ Partition retp7 = client.getPartitionTemplate(dbName, invalidTblName, vals5);
+ } catch (InvalidObjectException e1) {
+ exceptionThrown = true;
+ }
+
+ assertTrue("getPartitionTemplate() should have thrown InvalidObjectException"
+ + " due to invalid table name", exceptionThrown);
+
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if(isThriftClient) {
// since we are using thrift, 'part' will not have the create time and
Index: metastore/src/model/package.jdo
===================================================================
--- metastore/src/model/package.jdo (revision 1350406)
+++ metastore/src/model/package.jdo (working copy)
@@ -291,6 +291,9 @@
+
+
+
Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java
===================================================================
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java (revision 1350406)
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java (working copy)
@@ -32,6 +32,7 @@
private List bucketCols;
private List sortCols;
private Map parameters;
+ private String creationQuery;
public MStorageDescriptor() {}
@@ -47,10 +48,12 @@
* @param bucketCols
* @param sortOrder
* @param parameters
+ * @param creationQuery
*/
public MStorageDescriptor(MColumnDescriptor cd, String location, String inputFormat,
String outputFormat, boolean isCompressed, int numBuckets, MSerDeInfo serDeInfo,
- List bucketCols, List sortOrder, Map parameters) {
+ List bucketCols, List sortOrder, Map parameters,
+ String creationQuery) {
this.cd = cd;
this.location = location;
this.inputFormat = inputFormat;
@@ -61,6 +64,7 @@
this.bucketCols = bucketCols;
this.sortCols = sortOrder;
this.parameters = parameters;
+ this.creationQuery = creationQuery;
}
@@ -205,4 +209,20 @@
public List getSortCols() {
return sortCols;
}
+
+
+ /**
+ * @return the query which created the table
+ */
+ public String getCreationQuery() {
+ return creationQuery;
+ }
+
+
+ /**
+ * @param creationQuery the creationQuery to set
+ */
+ public void setCreationQuery(String creationQuery) {
+ this.creationQuery = creationQuery;
+ }
}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1350406)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy)
@@ -290,6 +290,21 @@
throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
/**
+ * Get a Partition Object without adding it to metastore. Similar to appendPartition,
+ * but does not call addPartition.
+ *
+ * @param tableName
+ * @param dbName
+ * @param partVals
+ * @return
+ * @throws InvalidObjectException
+ * @throws MetaException
+ * @throws TException
+ */
+ public Partition getPartitionTemplate(String tableName, String dbName,
+ List partVals) throws InvalidObjectException, MetaException, TException;
+
+ /**
* Add a partition to the table.
*
* @param partition
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1350406)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy)
@@ -375,6 +375,12 @@
client.append_partition_by_name(dbName, tableName, partName));
}
+ public Partition getPartitionTemplate(String db_name, String table_name,
+ List part_vals) throws InvalidObjectException,
+ MetaException, TException {
+ return deepCopy(client.get_partition_template(db_name, table_name, part_vals));
+ }
+
/**
* Create a new Database
* @param db
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1350406)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy)
@@ -1089,42 +1089,14 @@
List part_vals) throws InvalidObjectException,
AlreadyExistsException, MetaException {
- Partition part = new Partition();
+ Partition part = null;
boolean success = false, madeDir = false;
Path partLocation = null;
try {
ms.openTransaction();
- part.setDbName(dbName);
- part.setTableName(tableName);
- part.setValues(part_vals);
+ part = get_partition_template_core(dbName, tableName, part_vals, true);
+ partLocation = new Path(part.getSd().getLocation());
- Table tbl = ms.getTable(part.getDbName(), part.getTableName());
- if (tbl == null) {
- throw new InvalidObjectException(
- "Unable to add partition because table or database do not exist");
- }
- if (tbl.getSd().getLocation() == null) {
- throw new MetaException(
- "Cannot append a partition to a view");
- }
-
- part.setSd(tbl.getSd());
- partLocation = new Path(tbl.getSd().getLocation(), Warehouse
- .makePartName(tbl.getPartitionKeys(), part_vals));
- part.getSd().setLocation(partLocation.toString());
-
- Partition old_part = null;
- try {
- old_part = ms.getPartition(part.getDbName(), part
- .getTableName(), part.getValues());
- } catch (NoSuchObjectException e) {
- // this means there is no existing partition
- old_part = null;
- }
- if (old_part != null) {
- throw new AlreadyExistsException("Partition already exists:" + part);
- }
-
if (!wh.isDir(partLocation)) {
if (!wh.mkdirs(partLocation)) {
throw new MetaException(partLocation
@@ -1132,12 +1104,6 @@
}
madeDir = true;
}
-
- // set create time
- long time = System.currentTimeMillis() / 1000;
- part.setCreateTime((int) time);
- part.putToParameters(Constants.DDL_TIME, Long.toString(time));
-
success = ms.addPartition(part);
if (success) {
success = ms.commitTransaction();
@@ -1145,7 +1111,7 @@
} finally {
if (!success) {
ms.rollbackTransaction();
- if (madeDir) {
+ if(madeDir) {
wh.deleteDir(partLocation, true);
}
}
@@ -1153,6 +1119,75 @@
return part;
}
+ @Override
+ public Partition get_partition_template(String dbName, String tableName,
+ List part_vals) throws InvalidObjectException, MetaException {
+
+ startPartitionFunction("get_partition_template", dbName, tableName, part_vals);
+ Partition part = null;
+
+ try {
+ part = get_partition_template_core(dbName, tableName, part_vals, false);
+ } catch (AlreadyExistsException e) {
+ // ignore it. this will not happen
+ } finally {
+ endFunction("get_partition_template", part != null);
+ }
+ return part;
+ }
+
+ private Partition get_partition_template_core(String dbName, String tableName,
+ List part_vals, boolean failIfExists) throws InvalidObjectException,
+ AlreadyExistsException, MetaException {
+
+ Partition part = new Partition();
+ Path partLocation = null;
+ RawStore ms = getMS();
+
+ part.setDbName(dbName);
+ part.setTableName(tableName);
+ part.setValues(part_vals);
+
+ Table tbl = ms.getTable(part.getDbName(), part.getTableName());
+ if (tbl == null) {
+ throw new InvalidObjectException(
+ "Unable to get partition because table or database do not exist");
+ }
+ String tableType = tbl.getTableType();
+ if (!(tableType.equals(TableType.MANAGED_TABLE.toString()) ||
+ tableType.equals(TableType.EXTERNAL_TABLE.toString()))) {
+ throw new MetaException(
+ "Location cannot be empty. See if the table is a view.");
+ }
+
+ part.setSd(tbl.getSd());
+ partLocation = new Path(tbl.getSd().getLocation(), Warehouse
+ .makePartName(tbl.getPartitionKeys(), part_vals));
+ part.getSd().setLocation(partLocation.toString());
+
+ if(failIfExists) {
+ Partition old_part = null;
+ try {
+ old_part = ms.getPartition(part.getDbName(), part
+ .getTableName(), part.getValues());
+ } catch (NoSuchObjectException e) {
+ // this means there is no existing partition
+ old_part = null;
+ }
+ if (old_part != null) {
+ throw new AlreadyExistsException("Partition already exists:" + part);
+ }
+ }
+
+
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ part.setCreateTime((int) time);
+ part.putToParameters(Constants.DDL_TIME, Long.toString(time));
+
+ return part;
+ }
+
public Partition append_partition(final String dbName, final String tableName,
final List part_vals) throws InvalidObjectException,
AlreadyExistsException, MetaException {
Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1350406)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy)
@@ -998,7 +998,7 @@
msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
.isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd
.getSerDeInfo()), msd.getBucketCols(), convertToOrders(msd
- .getSortCols()), msd.getParameters());
+ .getSortCols()), msd.getParameters(), msd.getCreationQuery());
}
private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd)
@@ -1036,11 +1036,14 @@
if (sd == null) {
return null;
}
+ Boolean storeQuery = HiveConf.getBoolVar(getConf(), ConfVars.HIVE_STORE_CREATION_QUERY);
+ String creationQuery = storeQuery ? "set as " + sd.getCreationQuery() : "not set";
return new MStorageDescriptor(mcd, sd
.getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd
.isCompressed(), sd.getNumBuckets(), converToMSerDeInfo(sd
.getSerdeInfo()), sd.getBucketCols(),
- convertToMOrders(sd.getSortCols()), sd.getParameters());
+ convertToMOrders(sd.getSortCols()), sd.getParameters(),
+ creationQuery);
}
public boolean addPartition(Partition part) throws InvalidObjectException,
Index: metastore/if/hive_metastore.thrift
===================================================================
--- metastore/if/hive_metastore.thrift (revision 1350406)
+++ metastore/if/hive_metastore.thrift (working copy)
@@ -142,7 +142,8 @@
7: SerDeInfo serdeInfo, // serialization and deserialization information
8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns`
9: list sortCols, // sort order of the data in each bucket
- 10: map parameters // any user supplied key value hash
+ 10: map parameters, // any user supplied key value hash
+ 11: string creationQuery, // the query which created the table/partition
}
// table information
@@ -359,6 +360,17 @@
throws(1:NoSuchObjectException o1, 2:MetaException o2)
bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
+ // Gets a new partition object with the given parameters. Similar to append_partition,
+ // but does not add the partition to metastore
+ // It only works for tables; does not work for other objects like views.
+ // The table is fetched from the metastore using the db name and the table name.
+ // However, the actual partition is not fetched from the metastore.
+ // It does not matter whether the partition exists or not.
+ // The partition values are used to construct a new partition.
+
+ Partition get_partition_template(1:string db_name, 2:string tbl_name, 3:list part_vals)
+ throws (1:InvalidObjectException o1, 2:MetaException o2)
Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1350406)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -124,6 +124,7 @@
HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX,
HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
+ HiveConf.ConfVars.HIVE_STORE_CREATION_QUERY,
};
/**
@@ -458,6 +459,9 @@
HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE("hive.hadoop.supports.splittable.combineinputformat", false),
+ // whether to store creation query or not
+ HIVE_STORE_CREATION_QUERY("hive.metastore.storequery", false),
+
// Optimizer
HIVEOPTCP("hive.optimize.cp", true), // column pruner
HIVEOPTINDEXFILTER("hive.optimize.index.filter", false), // automatically use indexes
Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
===================================================================
--- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (revision 1350406)
+++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (working copy)
@@ -48,7 +48,6 @@
import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
import org.apache.hadoop.hive.ql.plan.ScriptDesc;
import org.apache.hadoop.hive.ql.plan.SelectDesc;
-import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.mapred.TextInputFormat;
@@ -117,7 +116,7 @@
db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
db.createTable(src, cols, null, TextInputFormat.class,
IgnoreKeyTextOutputFormat.class);
- db.loadTable(hadoopDataFile[i], src, false, false);
+ db.loadTable(hadoopDataFile[i], src, false, false, null);
i++;
}
Index: ql/src/test/org/apache/hadoop/hive/ql/history/TestHiveHistory.java
===================================================================
--- ql/src/test/org/apache/hadoop/hive/ql/history/TestHiveHistory.java (revision 1350406)
+++ ql/src/test/org/apache/hadoop/hive/ql/history/TestHiveHistory.java (working copy)
@@ -106,7 +106,7 @@
db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true);
db.createTable(src, cols, null, TextInputFormat.class,
IgnoreKeyTextOutputFormat.class);
- db.loadTable(hadoopDataFile[i], src, false, false);
+ db.loadTable(hadoopDataFile[i], src, false, false, null);
i++;
}
Index: ql/src/test/queries/clientpositive/alter5.q
===================================================================
--- ql/src/test/queries/clientpositive/alter5.q (revision 1350406)
+++ ql/src/test/queries/clientpositive/alter5.q (working copy)
@@ -13,12 +13,12 @@
--
alter table alter5 add partition (dt='a') location 'parta';
-describe extended alter5 partition (dt='a');
+describe formatted alter5 partition (dt='a');
insert overwrite table alter5 partition (dt='a') select col1 from alter5_src ;
select * from alter5 where dt='a';
-describe extended alter5 partition (dt='a');
+describe formatted alter5 partition (dt='a');
-- Cleanup
DROP TABLE alter5_src;
@@ -37,9 +37,9 @@
create table alter5 ( col1 string ) partitioned by (dt string);
alter table alter5 add partition (dt='a') location 'parta';
-describe extended alter5 partition (dt='a');
+describe formatted alter5 partition (dt='a');
insert overwrite table alter5 partition (dt='a') select col1 from alter5_src ;
select * from alter5 where dt='a';
-describe extended alter5 partition (dt='a');
+describe formatted alter5 partition (dt='a');
Index: ql/src/test/queries/clientpositive/alter_index.q
===================================================================
--- ql/src/test/queries/clientpositive/alter_index.q (revision 1350406)
+++ ql/src/test/queries/clientpositive/alter_index.q (working copy)
@@ -1,10 +1,10 @@
drop index src_index_8 on src;
create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-desc extended default__src_src_index_8__;
+desc formatted default__src_src_index_8__;
alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3");
-desc extended default__src_src_index_8__;
+desc formatted default__src_src_index_8__;
drop index src_index_8 on src;
Index: ql/src/test/queries/clientpositive/updateAccessTime.q
===================================================================
--- ql/src/test/queries/clientpositive/updateAccessTime.q (revision 1350406)
+++ ql/src/test/queries/clientpositive/updateAccessTime.q (working copy)
@@ -3,9 +3,9 @@
set hive.exec.pre.hooks = org.apache.hadoop.hive.ql.hooks.PreExecutePrinter,org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables,org.apache.hadoop.hive.ql.hooks.UpdateInputAccessTimeHook$PreExec;
create table tstsrc as select * from src;
-desc extended tstsrc;
+desc formatted tstsrc;
select count(1) from tstsrc;
-desc extended tstsrc;
+desc formatted tstsrc;
drop table tstsrc;
drop table tstsrcpart;
@@ -17,14 +17,14 @@
insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr from srcpart;
-desc extended tstsrcpart;
-desc extended tstsrcpart partition (ds='2008-04-08', hr='11');
-desc extended tstsrcpart partition (ds='2008-04-08', hr='12');
+desc formatted tstsrcpart;
+desc formatted tstsrcpart partition (ds='2008-04-08', hr='11');
+desc formatted tstsrcpart partition (ds='2008-04-08', hr='12');
select count(1) from tstsrcpart where ds = '2008-04-08' and hr = '11';
-desc extended tstsrcpart;
-desc extended tstsrcpart partition (ds='2008-04-08', hr='11');
-desc extended tstsrcpart partition (ds='2008-04-08', hr='12');
+desc formatted tstsrcpart;
+desc formatted tstsrcpart partition (ds='2008-04-08', hr='11');
+desc formatted tstsrcpart partition (ds='2008-04-08', hr='12');
drop table tstsrcpart;
Index: ql/src/test/queries/clientpositive/stats14.q
===================================================================
--- ql/src/test/queries/clientpositive/stats14.q (revision 1350406)
+++ ql/src/test/queries/clientpositive/stats14.q (working copy)
@@ -1,18 +1,26 @@
set datanucleus.cache.collections=false;
+set hive.metastore.storequery=true;
create table stats_src like src;
+desc formatted stats_src;
insert overwrite table stats_src select * from src;
+desc formatted stats_src;
analyze table stats_src compute statistics;
desc formatted stats_src;
create table stats_part like srcpart;
+desc formatted stats_part;
insert overwrite table stats_part partition (ds='2010-04-08', hr = '11') select key, value from src;
+desc formatted stats_part;
insert overwrite table stats_part partition (ds='2010-04-08', hr = '12') select key, value from src;
analyze table stats_part partition(ds='2010-04-08', hr='11') compute statistics;
analyze table stats_part partition(ds='2010-04-08', hr='12') compute statistics;
+desc formatted stats_part;
+desc formatted stats_part partition (ds='2010-04-08', hr = '11');
+desc formatted stats_part partition (ds='2010-04-08', hr = '12');
insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select key, value from src;
desc formatted stats_part;
@@ -23,4 +31,4 @@
desc formatted stats_part;
drop table stats_src;
-drop table stats_part;
\ No newline at end of file
+drop table stats_part;
Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java (revision 1350406)
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java (working copy)
@@ -190,6 +190,7 @@
formatOutput("Num Buckets:", String.valueOf(storageDesc.getNumBuckets()), tableInfo);
formatOutput("Bucket Columns:", storageDesc.getBucketCols().toString(), tableInfo);
formatOutput("Sort Columns:", storageDesc.getSortCols().toString(), tableInfo);
+ formatOutput("Creation Query:", storageDesc.getCreationQuery(), tableInfo);
if (storageDesc.getSerdeInfo().getParametersSize() > 0) {
tableInfo.append("Storage Desc Params:").append(LINE_DELIM);
Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1350406)
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy)
@@ -31,13 +31,13 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
-import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -63,7 +63,6 @@
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.HiveObjectType;
import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
@@ -586,7 +585,7 @@
String storageHandler, String location,
Map idxProps, Map tblProps, Map serdeProps,
String collItemDelim, String fieldDelim, String fieldEscape,
- String lineDelim, String mapKeyDelim, String indexComment)
+ String lineDelim, String mapKeyDelim, String indexComment, String creationQuery)
throws HiveException {
try {
@@ -687,6 +686,8 @@
storageDescriptor.setCols(indexTblCols);
storageDescriptor.setSortCols(sortCols);
+ storageDescriptor.setCreationQuery(creationQuery);
+
int time = (int) (System.currentTimeMillis() / 1000);
org.apache.hadoop.hive.metastore.api.Table tt = null;
HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass);
@@ -1100,10 +1101,11 @@
* @param holdDDLTime if true, force [re]create the partition
* @param inheritTableSpecs if true, on [re]creating the partition, take the
* location/inputformat/outputformat/serde details from table spec
+ * @param creationQuery the query which loaded the partition
*/
public void loadPartition(Path loadPath, String tableName,
Map partSpec, boolean replace, boolean holdDDLTime,
- boolean inheritTableSpecs)
+ boolean inheritTableSpecs, String creationQuery)
throws HiveException {
Table tbl = getTable(tableName);
try {
@@ -1155,7 +1157,7 @@
// recreate the partition if it existed before
if (!holdDDLTime) {
- getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs);
+ getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs, creationQuery);
}
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
@@ -1177,12 +1179,13 @@
* @param replace
* @param numDP number of dynamic partitions
* @param holdDDLTime
+ * @param creationQuery
* @return a list of strings with the dynamic partition paths
* @throws HiveException
*/
public ArrayList> loadDynamicPartitions(Path loadPath,
String tableName, Map partSpec, boolean replace,
- int numDP, boolean holdDDLTime)
+ int numDP, boolean holdDDLTime, String creationQuery)
throws HiveException {
Set validPartitions = new HashSet();
@@ -1229,7 +1232,7 @@
fullPartSpecs.add(fullPartSpec);
// finally load the partition -- move the file to the final table address
- loadPartition(partPath, tableName, fullPartSpec, replace, holdDDLTime, true);
+ loadPartition(partPath, tableName, fullPartSpec, replace, holdDDLTime, true, creationQuery);
LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
}
return fullPartSpecs;
@@ -1251,9 +1254,10 @@
* @param replace
* if true - replace files in the table, otherwise add files to table
* @param holdDDLTime
+ * @param creationQuery
*/
public void loadTable(Path loadPath, String tableName, boolean replace,
- boolean holdDDLTime) throws HiveException {
+ boolean holdDDLTime, String creationQuery) throws HiveException {
Table tbl = getTable(tableName);
if (replace) {
@@ -1262,6 +1266,8 @@
tbl.copyFiles(loadPath);
}
+ tbl.getTTable().getSd().setCreationQuery(creationQuery);
+
if (!holdDDLTime) {
try {
alterTable(tableName, tbl);
@@ -1285,7 +1291,7 @@
public Partition createPartition(Table tbl, Map partSpec)
throws HiveException {
return createPartition(tbl, partSpec, null, null, null, null, -1,
- null, null, null, null, null);
+ null, null, null, null, null, null);
}
/**
@@ -1316,7 +1322,7 @@
Path location, Map partParams, String inputFormat, String outputFormat,
int numBuckets, List cols,
String serializationLib, Map serdeParams,
- List bucketCols, List sortCols) throws HiveException {
+ List bucketCols, List sortCols, String creationQuery) throws HiveException {
org.apache.hadoop.hive.metastore.api.Partition partition = null;
@@ -1361,6 +1367,9 @@
if (sortCols != null) {
inPart.getSd().setSortCols(sortCols);
}
+ if (creationQuery != null) {
+ inPart.getSd().setCreationQuery(creationQuery);
+ }
partition = getMSC().add_partition(inPart);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
@@ -1372,7 +1381,7 @@
public Partition getPartition(Table tbl, Map partSpec,
boolean forceCreate) throws HiveException {
- return getPartition(tbl, partSpec, forceCreate, null, true);
+ return getPartition(tbl, partSpec, forceCreate, null, true, null);
}
/**
@@ -1387,11 +1396,13 @@
* created
* @param partPath the path where the partition data is located
* @param inheritTableSpecs whether to copy over the table specs for if/of/serde
+ * @param creationQuery
* @return result partition object or null if there is no partition
* @throws HiveException
*/
public Partition getPartition(Table tbl, Map partSpec,
- boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException {
+ boolean forceCreate, String partPath, boolean inheritTableSpecs,
+ String creationQuery) throws HiveException {
if (!tbl.isValidSpec(partSpec)) {
throw new HiveException("Invalid partition: " + partSpec);
}
@@ -1425,7 +1436,10 @@
if (tpart == null) {
LOG.debug("creating partition for table " + tbl.getTableName()
+ " with partition spec : " + partSpec);
- tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
+ tpart = getMSC().getPartitionTemplate(tbl.getDbName(), tbl.getTableName(), pvals);
+ tpart.getSd().setCreationQuery(creationQuery);
+ getMSC().add_partition(tpart);
+ // tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
}
else {
LOG.debug("altering partition for table " + tbl.getTableName()
@@ -1439,6 +1453,9 @@
throw new HiveException("new partition path should not be null or empty.");
}
tpart.getSd().setLocation(partPath);
+ if (creationQuery != null) {
+ tpart.getSd().setCreationQuery(creationQuery);
+ }
String fullName = tbl.getTableName();
if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getDbName() + "." + tbl.getTableName();
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (revision 1350406)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (working copy)
@@ -91,6 +91,7 @@
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryPlan;
@@ -2317,4 +2318,8 @@
public static Class getBuiltinUtilsClass() throws ClassNotFoundException {
return Class.forName("org.apache.hive.builtins.BuiltinUtils");
}
+
+ public static void setCreationQuery(StorageDescriptor sd) {
+ sd.setCreationQuery(SessionState.get().getCmd());
+ }
}
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1350406)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy)
@@ -141,6 +141,7 @@
import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
import org.apache.hadoop.hive.ql.plan.api.StageType;
import org.apache.hadoop.hive.ql.security.authorization.Privilege;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde.Constants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
@@ -853,6 +854,8 @@
validateSerDe(crtIndex.getSerde());
}
+
+
db
.createIndex(
crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(),
@@ -860,7 +863,8 @@
crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(),
crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(),
crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(),
- crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment()
+ crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment(),
+ SessionState.get().getCmd()
);
if (HiveUtils.getIndexHandler(conf, crtIndex.getIndexTypeHandlerClass()).usesIndexTable()) {
String indexTableName =
@@ -882,6 +886,7 @@
switch(alterIndex.getOp()) {
case ADDPROPS:
idx.getParameters().putAll(alterIndex.getProps());
+ Utilities.setCreationQuery(idx.getSd());
break;
case UPDATETIMESTAMP:
try {
@@ -992,7 +997,8 @@
addPartitionDesc.getSerializationLib(),
addPartitionDesc.getSerdeParams(),
addPartitionDesc.getBucketCols(),
- addPartitionDesc.getSortCols());
+ addPartitionDesc.getSortCols(),
+ SessionState.get().getCmd());
} else {
if (tbl.isView()) {
@@ -1008,7 +1014,8 @@
addPartitionDesc.getSerializationLib(),
addPartitionDesc.getSerdeParams(),
addPartitionDesc.getBucketCols(),
- addPartitionDesc.getSortCols());
+ addPartitionDesc.getSortCols(),
+ SessionState.get().getCmd());
}
Partition part = db
@@ -2975,6 +2982,9 @@
if (part == null) {
db.alterTable(alterTbl.getOldName(), tbl);
} else {
+
+ Utilities.setCreationQuery(part.getTPartition().getSd());
+
db.alterPartition(tbl.getTableName(), part);
}
} catch (InvalidOperationException e) {
@@ -3393,6 +3403,8 @@
return rc;
}
+ Utilities.setCreationQuery(tbl.getTTable().getSd());
+
// create the table
db.createTable(tbl, crtTbl.getIfNotExists());
work.getOutputs().add(new WriteEntity(tbl));
@@ -3481,6 +3493,8 @@
}
}
+ Utilities.setCreationQuery(tbl.getTTable().getSd());
+
// reset owner and creation time
int rc = setGenericTableAttributes(tbl);
if (rc != 0) {
@@ -3535,6 +3549,9 @@
}
oldview.setPartCols(crtView.getPartCols());
oldview.checkValidity();
+
+ Utilities.setCreationQuery(oldview.getTTable().getSd());
+
try {
db.alterTable(crtView.getViewName(), oldview);
} catch (InvalidOperationException e) {
@@ -3566,6 +3583,8 @@
return rc;
}
+ Utilities.setCreationQuery(tbl.getTTable().getSd());
+
db.createTable(tbl, crtView.getIfNotExists());
work.getOutputs().add(new WriteEntity(tbl));
}
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (revision 1350406)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (working copy)
@@ -37,8 +37,8 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -232,7 +232,7 @@
if (tbd.getPartitionSpec().size() == 0) {
dc = new DataContainer(table.getTTable());
db.loadTable(new Path(tbd.getSourceDir()), tbd.getTable()
- .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime());
+ .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime(), SessionState.get().getCmd());
if (work.getOutputs() != null) {
work.getOutputs().add(new WriteEntity(table, true));
}
@@ -263,7 +263,8 @@
tbd.getPartitionSpec(),
tbd.getReplace(),
dpCtx.getNumDPCols(),
- tbd.getHoldDDLTime());
+ tbd.getHoldDDLTime(),
+ SessionState.get().getCmd());
if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) {
throw new HiveException("This query creates no partitions." +
@@ -302,7 +303,8 @@
dc = null; // reset data container to prevent it being added again.
} else { // static partitions
db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(),
- tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(), tbd.getInheritTableSpecs());
+ tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(),
+ tbd.getInheritTableSpecs(), SessionState.get().getCmd());
Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
dc = new DataContainer(table.getTTable(), partn.getTPartition());
// add this partition to post-execution hook