Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1309029)
+++ conf/hive-default.xml.template (working copy)
@@ -298,6 +298,12 @@
+ hive.metastore.batch.retrieve.table.partition.max
+ 1000
+ Maximum number of table partitions that metastore internally retrieves in one batch.
+
+
+
hive.default.fileformat
TextFile
Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS <TEXTFILE|SEQUENCEFILE> to override
Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1309029)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy)
@@ -437,6 +437,119 @@
return part4;
}
+ public void testListPartitions() throws Throwable {
+ // create a table with multiple partitions
+ String dbName = "compdb";
+ String tblName = "comptbl";
+ String typeName = "Person";
+
+ cleanUp(dbName, tblName, typeName);
+
+ List> values = new ArrayList>();
+ values.add(makeVals("2008-07-01 14:13:12", "14"));
+ values.add(makeVals("2008-07-01 14:13:12", "15"));
+ values.add(makeVals("2008-07-02 14:13:12", "15"));
+ values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+ createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+ List partitions = client.listPartitions(dbName, tblName, (short)-1);
+ assertNotNull("should have returned partitions", partitions);
+ assertEquals(" should have returned " + values.size() +
+ " partitions", values.size(), partitions.size());
+
+ partitions = client.listPartitions(dbName, tblName, (short)(values.size()/2));
+
+ assertNotNull("should have returned partitions", partitions);
+ assertEquals(" should have returned " + values.size() / 2 +
+ " partitions",values.size() / 2, partitions.size());
+
+
+ partitions = client.listPartitions(dbName, tblName, (short) (values.size() * 2));
+
+ assertNotNull("should have returned partitions", partitions);
+ assertEquals(" should have returned " + values.size() +
+ " partitions",values.size(), partitions.size());
+
+ cleanUp(dbName, tblName, typeName);
+
+ }
+
+
+
+ public void testListPartitionNames() throws Throwable {
+ // create a table with multiple partitions
+ String dbName = "compdb";
+ String tblName = "comptbl";
+ String typeName = "Person";
+
+ cleanUp(dbName, tblName, typeName);
+
+ List> values = new ArrayList>();
+ values.add(makeVals("2008-07-01 14:13:12", "14"));
+ values.add(makeVals("2008-07-01 14:13:12", "15"));
+ values.add(makeVals("2008-07-02 14:13:12", "15"));
+ values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+
+
+ createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+ List partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+ assertNotNull("should have returned partitions", partitions);
+ assertEquals(" should have returned " + values.size() +
+ " partitions", values.size(), partitions.size());
+
+ partitions = client.listPartitionNames(dbName, tblName, (short)(values.size()/2));
+
+ assertNotNull("should have returned partitions", partitions);
+ assertEquals(" should have returned " + values.size() / 2 +
+ " partitions",values.size() / 2, partitions.size());
+
+
+ partitions = client.listPartitionNames(dbName, tblName, (short) (values.size() * 2));
+
+ assertNotNull("should have returned partitions", partitions);
+ assertEquals(" should have returned " + values.size() +
+ " partitions",values.size(), partitions.size());
+
+ cleanUp(dbName, tblName, typeName);
+
+ }
+
+
+ public void testDropTable() throws Throwable {
+ // create a table with multiple partitions
+ String dbName = "compdb";
+ String tblName = "comptbl";
+ String typeName = "Person";
+
+ cleanUp(dbName, tblName, typeName);
+
+ List> values = new ArrayList>();
+ values.add(makeVals("2008-07-01 14:13:12", "14"));
+ values.add(makeVals("2008-07-01 14:13:12", "15"));
+ values.add(makeVals("2008-07-02 14:13:12", "15"));
+ values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+ createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+ client.dropTable(dbName, tblName);
+ client.dropType(typeName);
+
+ boolean exceptionThrown = false;
+ try {
+ client.getTable(dbName, tblName);
+ } catch(Exception e) {
+ assertEquals("table should not have existed",
+ NoSuchObjectException.class, e.getClass());
+ exceptionThrown = true;
+ }
+ assertTrue("Table " + tblName + " should have been dropped ", exceptionThrown);
+
+ }
+
+
public void testAlterPartition() throws Throwable {
try {
@@ -1670,11 +1783,9 @@
vals3.add("p12");
vals3.add("p21");
- silentDropDatabase(dbName);
+ cleanUp(dbName, tblName, null);
- Database db = new Database();
- db.setName(dbName);
- client.createDatabase(db);
+ createDb(dbName);
ArrayList cols = new ArrayList(2);
cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
@@ -1684,26 +1795,16 @@
partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
+ Map serdParams = new HashMap();
+ serdParams.put(Constants.SERIALIZATION_FORMAT, "1");
+ StorageDescriptor sd = createStorageDescriptor(tblName, partCols, null, serdParams);
+
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
- StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
- sd.setCols(cols);
- sd.setCompressed(false);
- sd.setNumBuckets(1);
- sd.setParameters(new HashMap());
- sd.setBucketCols(new ArrayList());
- sd.setSerdeInfo(new SerDeInfo());
- sd.getSerdeInfo().setName(tbl.getTableName());
- sd.getSerdeInfo().setParameters(new HashMap());
- sd.getSerdeInfo().getParameters()
- .put(Constants.SERIALIZATION_FORMAT, "1");
- sd.setSortCols(new ArrayList());
-
tbl.setPartitionKeys(partCols);
client.createTable(tbl);
-
tbl = client.getTable(dbName, tblName);
add_partition(client, tbl, vals, "part1");
@@ -1719,8 +1820,7 @@
checkFilter(client, dbName, tblName, "p2 like \"p2.*\"", 3);
checkFilter(client, dbName, tblName, "p2 like \"p.*2\"", 1);
- client.dropTable(dbName, tblName);
- client.dropDatabase(dbName);
+ cleanUp(dbName, tblName, null);
}
private void checkFilter(HiveMetaStoreClient client, String dbName,
@@ -1891,48 +1991,34 @@
}
}
- private Table createTableForTestFilter(String dbName, String tableName, String owner, int lastAccessTime, boolean hasSecondParam) throws Exception {
- client.dropTable(dbName, tableName);
+ private Table createTableForTestFilter(String dbName, String tableName, String owner,
+ int lastAccessTime, boolean hasSecondParam) throws Exception {
ArrayList cols = new ArrayList(2);
cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
- Table tbl = new Table();
- tbl.setDbName(dbName);
- tbl.setTableName(tableName);
- tbl.setParameters(new HashMap());
- tbl.getParameters().put("test_param_1", "hi");
- if (hasSecondParam) {
- tbl.getParameters().put("test_param_2", "50");
- }
- StorageDescriptor sd = new StorageDescriptor();
- tbl.setSd(sd);
- sd.setCols(cols);
- sd.setCompressed(false);
- sd.setNumBuckets(1);
- sd.setParameters(new HashMap());
- sd.getParameters().put("sd_param_1", "Use this for comments etc");
- sd.setBucketCols(new ArrayList(2));
- sd.getBucketCols().add("name");
- sd.setSerdeInfo(new SerDeInfo());
- sd.getSerdeInfo().setName(tbl.getTableName());
- sd.getSerdeInfo().setParameters(new HashMap());
- sd.getSerdeInfo().getParameters()
- .put(Constants.SERIALIZATION_FORMAT, "1");
- sd.setSortCols(new ArrayList());
+ Map params = new HashMap();
+ params.put("sd_param_1", "Use this for comments etc");
- tbl.setOwner(owner);
- tbl.setLastAccessTime(lastAccessTime);
+ Map serdParams = new HashMap();
+ serdParams.put(Constants.SERIALIZATION_FORMAT, "1");
- tbl.setPartitionKeys(new ArrayList(2));
- tbl.getPartitionKeys().add(
- new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
- tbl.getPartitionKeys().add(
- new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
+ StorageDescriptor sd = createStorageDescriptor(tableName, cols, params, serdParams);
- client.createTable(tbl);
+ Map partitionKeys = new HashMap();
+ partitionKeys.put("ds", Constants.STRING_TYPE_NAME);
+ partitionKeys.put("hr", Constants.INT_TYPE_NAME);
+ Map tableParams = new HashMap();
+ tableParams.put("test_param_1", "hi");
+ if(hasSecondParam) {
+ tableParams.put("test_param_2", "50");
+ }
+
+ Table tbl = createTable(dbName, tableName, owner, tableParams,
+ partitionKeys, sd, lastAccessTime);
+
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
@@ -1942,7 +2028,6 @@
}
return tbl;
}
-
/**
* Verify that if another client, either a metastore Thrift server or a Hive CLI instance
* renames a table recently created by this instance, and hence potentially in its cache, the
@@ -1955,37 +2040,24 @@
String renameTblName = "rename_concurrenttbl";
try {
- client.dropTable(dbName, tblName);
- silentDropDatabase(dbName);
+ cleanUp(dbName, tblName, null);
- Database db = new Database();
- db.setName(dbName);
- client.createDatabase(db);
+ createDb(dbName);
ArrayList cols = new ArrayList(2);
cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
- Table tbl = new Table();
- tbl.setDbName(dbName);
- tbl.setTableName(tblName);
- StorageDescriptor sd = new StorageDescriptor();
- tbl.setSd(sd);
- sd.setCols(cols);
- sd.setCompressed(false);
- sd.setNumBuckets(1);
- sd.setParameters(new HashMap());
- sd.getParameters().put("test_param_1", "Use this for comments etc");
- sd.setBucketCols(new ArrayList(2));
- sd.getBucketCols().add("name");
- sd.setSerdeInfo(new SerDeInfo());
- sd.getSerdeInfo().setName(tbl.getTableName());
- sd.getSerdeInfo().setParameters(new HashMap());
- sd.getSerdeInfo().getParameters().put(
- org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
+ Map params = new HashMap();
+ params.put("test_param_1", "Use this for comments etc");
- client.createTable(tbl);
+ Map serdParams = new HashMap();
+ serdParams.put(Constants.SERIALIZATION_FORMAT, "1");
+ StorageDescriptor sd = createStorageDescriptor(tblName, cols, params, serdParams);
+
+ createTable(dbName, tblName, null, null, null, sd, 0);
+
// get the table from the client, verify the name is correct
Table tbl2 = client.getTable(dbName, tblName);
@@ -2042,4 +2114,146 @@
Utilities.executeWithRetry(execUpdate, updateStmt, interval, attempts);
}
+
+ private void cleanUp(String dbName, String tableName, String typeName) throws Exception {
+ if(dbName != null && tableName != null) {
+ client.dropTable(dbName, tableName);
+ }
+ if(dbName != null) {
+ silentDropDatabase(dbName);
+ }
+ if(typeName != null) {
+ client.dropType(typeName);
+ }
+ }
+
+ private Database createDb(String dbName) throws Exception {
+ if(null == dbName) { return null; }
+ Database db = new Database();
+ db.setName(dbName);
+ client.createDatabase(db);
+ return db;
+ }
+
+ private Type createType(String typeName, Map fields) throws Throwable {
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList(fields.size()));
+ for(String fieldName : fields.keySet()) {
+ typ1.getFields().add(
+ new FieldSchema(fieldName, fields.get(fieldName), ""));
+ }
+ client.createType(typ1);
+ return typ1;
+ }
+
+ private Table createTable(String dbName, String tblName, String owner,
+ Map tableParams, Map partitionKeys,
+ StorageDescriptor sd, int lastAccessTime) throws Exception {
+ Table tbl = new Table();
+ tbl.setDbName(dbName);
+ tbl.setTableName(tblName);
+ if(tableParams != null) {
+ tbl.setParameters(tableParams);
+ }
+
+ if(owner != null) {
+ tbl.setOwner(owner);
+ }
+
+ if(partitionKeys != null) {
+ tbl.setPartitionKeys(new ArrayList(partitionKeys.size()));
+ for(String key : partitionKeys.keySet()) {
+ tbl.getPartitionKeys().add(
+ new FieldSchema(key, partitionKeys.get(key), ""));
+ }
+ }
+
+ tbl.setSd(sd);
+ tbl.setLastAccessTime(lastAccessTime);
+
+ client.createTable(tbl);
+ return tbl;
+ }
+
+ private StorageDescriptor createStorageDescriptor(String tableName,
+ List cols, Map params, Map serdParams) {
+ StorageDescriptor sd = new StorageDescriptor();
+
+ sd.setCols(cols);
+ sd.setCompressed(false);
+ sd.setNumBuckets(1);
+ sd.setParameters(params);
+ sd.setBucketCols(new ArrayList(2));
+ sd.getBucketCols().add("name");
+ sd.setSerdeInfo(new SerDeInfo());
+ sd.getSerdeInfo().setName(tableName);
+ sd.getSerdeInfo().setParameters(serdParams);
+ sd.getSerdeInfo().getParameters()
+ .put(Constants.SERIALIZATION_FORMAT, "1");
+ sd.setSortCols(new ArrayList());
+
+ return sd;
+ }
+
+ private List createPartitions(String dbName, Table tbl,
+ List> values) throws Throwable {
+ int i = 1;
+ List partitions = new ArrayList();
+ for(List vals : values) {
+ Partition part = makePartitionObject(dbName, tbl.getTableName(), vals, tbl, "/part"+i);
+ i++;
+ // check if the partition exists (it shouldn't)
+ boolean exceptionThrown = false;
+ try {
+ Partition p = client.getPartition(dbName, tbl.getTableName(), vals);
+ } catch(Exception e) {
+ assertEquals("partition should not have existed",
+ NoSuchObjectException.class, e.getClass());
+ exceptionThrown = true;
+ }
+ assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
+ Partition retp = client.add_partition(part);
+ assertNotNull("Unable to create partition " + part, retp);
+ partitions.add(retp);
+ }
+ return partitions;
+ }
+
+ private void createMultiPartitionTableSchema(String dbName, String tblName,
+ String typeName, List> values)
+ throws Throwable, MetaException, TException, NoSuchObjectException {
+ createDb(dbName);
+
+ Map fields = new HashMap();
+ fields.put("name", Constants.STRING_TYPE_NAME);
+ fields.put("income", Constants.INT_TYPE_NAME);
+
+ Type typ1 = createType(typeName, fields);
+
+ Map partitionKeys = new HashMap();
+ partitionKeys.put("ds", Constants.STRING_TYPE_NAME);
+ partitionKeys.put("hr", Constants.STRING_TYPE_NAME);
+
+ Map params = new HashMap();
+ params.put("test_param_1", "Use this for comments etc");
+
+ Map serdParams = new HashMap();
+ serdParams.put(Constants.SERIALIZATION_FORMAT, "1");
+
+ StorageDescriptor sd = createStorageDescriptor(tblName, typ1.getFields(), params, serdParams);
+
+ Table tbl = createTable(dbName, tblName, null, null, partitionKeys, sd, 0);
+
+ if (isThriftClient) {
+ // the createTable() above does not update the location in the 'tbl'
+ // object when the client is a thrift client and the code below relies
+ // on the location being present in the 'tbl' object - so get the table
+ // from the metastore
+ tbl = client.getTable(dbName, tblName);
+ }
+
+ createPartitions(dbName, tbl, values);
+ }
+
}
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (revision 1309029)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (working copy)
@@ -161,7 +161,7 @@
}
// also the location field in partition
- List parts = msdb.getPartitions(dbname, name, 0);
+ List parts = msdb.getPartitions(dbname, name, -1);
for (Partition part : parts) {
String oldPartLoc = part.getSd().getLocation();
Path oldPartLocPath = new Path(oldPartLoc);
Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1309029)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy)
@@ -54,7 +54,6 @@
import org.apache.hadoop.hive.common.classification.InterfaceStability;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.RegionStorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -72,6 +71,7 @@
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.api.RegionStorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -80,7 +80,6 @@
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.model.MRegionStorageDescriptor;
import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
import org.apache.hadoop.hive.metastore.model.MDatabase;
@@ -92,6 +91,7 @@
import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
+import org.apache.hadoop.hive.metastore.model.MRegionStorageDescriptor;
import org.apache.hadoop.hive.metastore.model.MRole;
import org.apache.hadoop.hive.metastore.model.MRoleMap;
import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
@@ -705,10 +705,16 @@
pm.deletePersistentAll(partColGrants);
}
+ int partitionBatchSize = HiveConf.getIntVar(getConf(),
+ ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
+
// call dropPartition on each of the table's partitions to follow the
// procedure for cleanly dropping partitions.
- List partsToDelete = listMPartitions(dbName, tableName, -1);
- if (partsToDelete != null) {
+ while(true) {
+ List partsToDelete = listMPartitions(dbName, tableName, partitionBatchSize);
+ if (partsToDelete == null || partsToDelete.isEmpty()) {
+ break;
+ }
for (MPartition mpart : partsToDelete) {
dropPartitionCommon(mpart);
}
@@ -1296,20 +1302,19 @@
public List getPartitions(String dbName, String tableName, int max)
throws MetaException {
openTransaction();
- List parts = convertToParts(listMPartitions(dbName, tableName,
- max));
+ List parts = convertToParts(listMPartitions(dbName, tableName, max));
commitTransaction();
return parts;
}
@Override
public List getPartitionsWithAuth(String dbName, String tblName,
- short maxParts, String userName, List groupNames)
+ short max, String userName, List groupNames)
throws MetaException, NoSuchObjectException, InvalidObjectException {
boolean success = false;
try {
openTransaction();
- List mparts = listMPartitions(dbName, tblName, maxParts);
+ List mparts = listMPartitions(dbName, tblName, max);
List parts = new ArrayList(mparts.size());
if (mparts != null && mparts.size()>0) {
for (MPartition mpart : mparts) {
@@ -1403,6 +1408,10 @@
+ "order by partitionName asc");
q.declareParameters("java.lang.String t1, java.lang.String t2");
q.setResult("partitionName");
+
+ if(max > 0) {
+ q.setRange(0, max);
+ }
Collection names = (Collection) q.execute(dbName, tableName);
for (Iterator i = names.iterator(); i.hasNext();) {
pns.add((String) i.next());
@@ -1538,6 +1547,7 @@
// TODO:pc implement max
private List listMPartitions(String dbName, String tableName,
int max) {
+
boolean success = false;
List mparts = null;
try {
@@ -1549,6 +1559,9 @@
"table.tableName == t1 && table.database.name == t2");
query.declareParameters("java.lang.String t1, java.lang.String t2");
query.setOrdering("partitionName ascending");
+ if(max > 0) {
+ query.setRange(0, max);
+ }
mparts = (List) query.execute(tableName, dbName);
LOG.debug("Done executing query for listMPartitions");
pm.retrieveAll(mparts);
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1309029)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -122,6 +122,7 @@
HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
+ HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX,
HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
};
@@ -289,6 +290,8 @@
METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus"),
METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG"),
METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300),
+ METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX(
+ "hive.metastore.batch.retrieve.table.partition.max", 1000),
METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", ""),
METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", ""),
// should we do checks against the storage (usually hdfs) for operations like drop_partition
@@ -312,6 +315,8 @@
METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", ""),
METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties",""),
+
+
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false),
CLIPRINTCURRENTDB("hive.cli.print.current.db", false),
Index: ql/src/test/results/clientnegative/drop_table_failure3.q.out
===================================================================
--- ql/src/test/results/clientnegative/drop_table_failure3.q.out (revision 0)
+++ ql/src/test/results/clientnegative/drop_table_failure3.q.out (revision 0)
@@ -0,0 +1,49 @@
+PREHOOK: query: create database dtf3
+PREHOOK: type: CREATEDATABASE
+POSTHOOK: query: create database dtf3
+POSTHOOK: type: CREATEDATABASE
+PREHOOK: query: use dtf3
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: use dtf3
+POSTHOOK: type: SWITCHDATABASE
+PREHOOK: query: create table drop_table_failure_temp(col STRING) partitioned by (p STRING)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table drop_table_failure_temp(col STRING) partitioned by (p STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: dtf3@drop_table_failure_temp
+PREHOOK: query: alter table drop_table_failure_temp add partition (p ='p1')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: query: alter table drop_table_failure_temp add partition (p ='p1')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: Output: dtf3@drop_table_failure_temp@p=p1
+PREHOOK: query: alter table drop_table_failure_temp add partition (p ='p2')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: query: alter table drop_table_failure_temp add partition (p ='p2')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: Output: dtf3@drop_table_failure_temp@p=p2
+PREHOOK: query: alter table drop_table_failure_temp add partition (p ='p3')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: query: alter table drop_table_failure_temp add partition (p ='p3')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: Output: dtf3@drop_table_failure_temp@p=p3
+PREHOOK: query: alter table drop_table_failure_temp partition (p ='p3') ENABLE NO_DROP
+PREHOOK: type: ALTERPARTITION_PROTECTMODE
+PREHOOK: Input: dtf3@drop_table_failure_temp
+PREHOOK: Output: dtf3@drop_table_failure_temp@p=p3
+POSTHOOK: query: alter table drop_table_failure_temp partition (p ='p3') ENABLE NO_DROP
+POSTHOOK: type: ALTERPARTITION_PROTECTMODE
+POSTHOOK: Input: dtf3@drop_table_failure_temp
+POSTHOOK: Input: dtf3@drop_table_failure_temp@p=p3
+POSTHOOK: Output: dtf3@drop_table_failure_temp@p=p3
+PREHOOK: query: drop table drop_table_failure_temp
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: dtf3@drop_table_failure_temp
+PREHOOK: Output: dtf3@drop_table_failure_temp
+FAILED: Error in metadata: Table drop_table_failure_temp Partitionp=p3 is protected from being dropped
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask
Index: ql/src/test/results/clientpositive/drop_table2.q.out
===================================================================
--- ql/src/test/results/clientpositive/drop_table2.q.out (revision 0)
+++ ql/src/test/results/clientpositive/drop_table2.q.out (revision 0)
@@ -0,0 +1,58 @@
+PREHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@temp
+PREHOOK: query: alter table temp add if not exists partition (p ='p1')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: default@temp
+POSTHOOK: query: alter table temp add if not exists partition (p ='p1')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: default@temp
+POSTHOOK: Output: default@temp@p=p1
+PREHOOK: query: alter table temp add if not exists partition (p ='p2')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: default@temp
+POSTHOOK: query: alter table temp add if not exists partition (p ='p2')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: default@temp
+POSTHOOK: Output: default@temp@p=p2
+PREHOOK: query: alter table temp add if not exists partition (p ='p3')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: default@temp
+POSTHOOK: query: alter table temp add if not exists partition (p ='p3')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: default@temp
+POSTHOOK: Output: default@temp@p=p3
+PREHOOK: query: show partitions temp
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions temp
+POSTHOOK: type: SHOWPARTITIONS
+p=p1
+p=p2
+p=p3
+PREHOOK: query: drop table temp
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@temp
+PREHOOK: Output: default@temp
+POSTHOOK: query: drop table temp
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@temp
+POSTHOOK: Output: default@temp
+PREHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table if not exists temp(col STRING) partitioned by (p STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@temp
+PREHOOK: query: show partitions temp
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: show partitions temp
+POSTHOOK: type: SHOWPARTITIONS
+PREHOOK: query: drop table temp
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@temp
+PREHOOK: Output: default@temp
+POSTHOOK: query: drop table temp
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@temp
+POSTHOOK: Output: default@temp
Index: ql/src/test/queries/clientnegative/drop_table_failure3.q
===================================================================
--- ql/src/test/queries/clientnegative/drop_table_failure3.q (revision 0)
+++ ql/src/test/queries/clientnegative/drop_table_failure3.q (revision 0)
@@ -0,0 +1,12 @@
+create database dtf3;
+use dtf3;
+
+create table drop_table_failure_temp(col STRING) partitioned by (p STRING);
+
+alter table drop_table_failure_temp add partition (p ='p1');
+alter table drop_table_failure_temp add partition (p ='p2');
+alter table drop_table_failure_temp add partition (p ='p3');
+
+alter table drop_table_failure_temp partition (p ='p3') ENABLE NO_DROP;
+
+drop table drop_table_failure_temp;
Index: ql/src/test/queries/clientpositive/drop_table2.q
===================================================================
--- ql/src/test/queries/clientpositive/drop_table2.q (revision 0)
+++ ql/src/test/queries/clientpositive/drop_table2.q (revision 0)
@@ -0,0 +1,15 @@
+SET hive.metastore.batch.retrieve.max=1;
+create table if not exists temp(col STRING) partitioned by (p STRING);
+alter table temp add if not exists partition (p ='p1');
+alter table temp add if not exists partition (p ='p2');
+alter table temp add if not exists partition (p ='p3');
+
+show partitions temp;
+
+drop table temp;
+
+create table if not exists temp(col STRING) partitioned by (p STRING);
+
+show partitions temp;
+
+drop table temp;
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1309029)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy)
@@ -3171,15 +3171,24 @@
" is protected from being dropped");
}
+ int partitionBatchSize = HiveConf.getIntVar(conf,
+ ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX);
+
// We should check that all the partitions of the table can be dropped
if (tbl != null && tbl.isPartitioned()) {
- List listPartitions = db.getPartitions(tbl);
- for (Partition p: listPartitions) {
+ List partitionNames = db.getPartitionNames(tbl.getTableName(), (short)-1);
+
+ for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) {
+ List partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize,
+ partitionNames.size()));
+ List listPartitions = db.getPartitionsByNames(tbl, partNames);
+ for (Partition p: listPartitions) {
if (!p.canDrop()) {
throw new HiveException("Table " + tbl.getTableName() +
- " Partition" + p.getName() +
- " is protected from being dropped");
+ " Partition" + p.getName() +
+ " is protected from being dropped");
}
+ }
}
}