Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 9522) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -163,7 +163,7 @@ tbl.getPartitionKeys().add( new FieldSchema("ds", Constants.STRING_TYPE_NAME, "")); tbl.getPartitionKeys().add( - new FieldSchema("hr", Constants.INT_TYPE_NAME, "")); + new FieldSchema("hr", Constants.STRING_TYPE_NAME, "")); client.createTable(tbl); @@ -1062,9 +1062,9 @@ "(p1=\"p13\" aNd p2=\"p24\")", 4); //test for and or precedence checkFilter(client, dbName, tblName, - "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); + "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); checkFilter(client, dbName, tblName, - "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); + "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2); checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4); Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy) @@ -230,10 +230,37 @@ if (e.getValue() == null || e.getValue().length() == 0) { throw new MetaException("Partition spec is incorrect. " + spec); } + } + return makePartName(spec, null); + } + + /** + * Similar to makePartName(), but replaces null or zero length partition + * values with nullStr. + * + * @param spec + * @param nullStr + * @return + * @throws MetaException + */ + public static String makePartName(Map spec, String nullStr) + throws MetaException { + StringBuilder suffixBuf = new StringBuilder(); + int i=0; + for (Entry e : spec.entrySet()) { + String value = e.getValue(); + if (value == null || value.length() == 0) { + value = nullStr; + } else { + value = escapePathName(value); + } + if (i > 0) { + suffixBuf.append(Path.SEPARATOR); + } suffixBuf.append(escapePathName(e.getKey())); suffixBuf.append('='); - suffixBuf.append(escapePathName(e.getValue())); - suffixBuf.append(Path.SEPARATOR); + suffixBuf.append(value); + i++; } return suffixBuf.toString(); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -106,6 +106,23 @@ public abstract List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; + /** + * Returns only partition names that match the regex. Note that the regex is + * limited to ., .*, and (?i) following the limitations of JDOQL + * + * @param db_name + * @param tbl_name + * @param regex + * @param max_parts + * @return + * @throws MetaException + */ + public abstract List listPartitionNames(String db_name, + String tbl_name, String regex, short max_parts) throws MetaException; + + public abstract List listPartitionNamesByFilter(String db_name, + String tbl_name, String filter, short max_parts) throws MetaException; + public abstract void alterPartition(String db_name, String tbl_name, Partition new_part) throws InvalidObjectException, MetaException; @@ -126,4 +143,7 @@ String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; + public abstract List getPartitionsByRegex( + String dbName, String tblName, String regex, short maxParts) + throws MetaException, NoSuchObjectException; } Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -1743,63 +1743,128 @@ } @Override - public List get_partitions_ps(String db_name, String tbl_name, - List part_vals, short max_parts) throws MetaException, + public List get_partitions_ps(final String db_name, final String tbl_name, + final List part_vals, final short max_parts) throws MetaException, TException { incrementCounter("get_partitions_ps"); logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); - List parts = null; - List matchingParts = new ArrayList(); - // This gets all the partitions and then filters based on the specified - // criteria. An alternative approach would be to get all the partition - // names, do the filtering on the names, and get the partition for each - // of the names. that match. + final String regex = makeRegexStringFromPartVals(db_name, tbl_name, part_vals); + List ret = null; try { - parts = get_partitions(db_name, tbl_name, (short) -1); + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getPartitionsByRegex(db_name, tbl_name, regex, max_parts); + } + }); + } catch (MetaException e) { + throw e; } catch (NoSuchObjectException e) { + ret = new ArrayList(); + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + return ret; + + } + + /** + * Given a list of partition values, create a regex that can be used for + * pruning based on the partition name. + * + * Example: t is partitioned on ds and hr. Calling this function + * with partVals = ["2010-10-01"] will return "ds=2010-10-01/hr=.*" + * @param dbName + * @param tblName + * @param partVals + * @return + * @throws MetaException + */ + private String makeRegexStringFromPartVals(String dbName, String tblName, + List partVals) throws MetaException { + Table t; + try { + t = get_table(dbName, tblName); + } catch (NoSuchObjectException e) { throw new MetaException(e.getMessage()); } - for (Partition p : parts) { - if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) { - matchingParts.add(p); + if (partVals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + + // Build up a spec but for all unspecified values, leave them empty + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (FieldSchema fs : t.getPartitionKeys()) { + String value = ""; + if (i < partVals.size()) { + value = partVals.get(i); } + partKeyToValues.put(fs.getName(), value); + i++; } - - return matchingParts; + // Create a regex to match partition names by replacing all null values + // with .*, which matches any string + return Warehouse.makePartName(partKeyToValues, ".*"); } @Override - public List get_partition_names_ps(String db_name, String tbl_name, - List part_vals, short max_parts) throws MetaException, TException { + public List get_partition_names_ps(final String db_name, + final String tbl_name, final List part_vals, final short max_parts) + throws MetaException, TException { incrementCounter("get_partition_names_ps"); logStartPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); - Table t; + final String regex = makeRegexStringFromPartVals(db_name, tbl_name, part_vals); + List ret = null; try { - t = get_table(db_name, tbl_name); - } catch (NoSuchObjectException e) { - throw new MetaException(e.getMessage()); + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.listPartitionNames(db_name, tbl_name, regex, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; } + return ret; - List partNames = get_partition_names(db_name, tbl_name, max_parts); - List filteredPartNames = new ArrayList(); - - for(String name : partNames) { - LinkedHashMap spec = Warehouse.makeSpecFromName(name); - List vals = new ArrayList(); - // Since we are iterating through a LinkedHashMap, iteration should - // return the partition values in the correct order for comparison. - for (String val : spec.values()) { - vals.add(val); + /* + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } - if (MetaStoreUtils.pvalMatches(part_vals, vals)) { - filteredPartNames.add(name); - } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return filteredPartNames; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; + */ } @Override Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.metastore; -import java.io.ByteArrayInputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -41,7 +40,6 @@ import org.antlr.runtime.CharStream; import org.antlr.runtime.CommonTokenStream; import org.antlr.runtime.RecognitionException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; @@ -68,10 +66,9 @@ import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MType; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.metastore.parser.FilterParser; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; import org.apache.hadoop.util.StringUtils; /** @@ -954,6 +951,46 @@ return mparts; } + private List listMPartitions(String dbName, String tableName, + String nameRegex, int max) { + boolean success = false; + List mparts = null; + + String regexClause = null; + if (nameRegex == null || nameRegex.length() == 0) { + regexClause = ""; + } else { + regexClause = " && partitionName.matches(t3)"; + } + + try { + openTransaction(); + LOG.debug("Executing listMPartitions"); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + Query query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2" + regexClause); + + if (regexClause.length() == 0) { + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mparts = (List) query.execute(tableName, dbName); + } else { + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mparts = (List) query.execute(tableName, dbName, nameRegex); + } + + LOG.debug("Done executing query for listMPartitions"); + pm.retrieveAll(mparts); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitions"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mparts; + } + @Override public List getPartitionsByFilter(String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { @@ -964,6 +1001,51 @@ return parts; } + private String makeQueryFilterString(MTable mtable, String filter, + Map params) + throws MetaException { + StringBuilder queryBuilder = new StringBuilder( + "table.tableName == t1 && table.database.name == t2"); + + if( filter != null ) { + + Table table = convertToTable(mtable); + + CharStream cs = new ANTLRNoCaseStringStream(filter); + FilterLexer lexer = new FilterLexer(cs); + + CommonTokenStream tokens = new CommonTokenStream(); + tokens.setTokenSource (lexer); + + FilterParser parser = new FilterParser(tokens); + + try { + parser.filter(); + } catch(RecognitionException re) { + throw new MetaException("Error parsing partition filter : " + re); + } + + String jdoFilter = parser.tree.generateJDOFilter(table, params); + + if( jdoFilter.trim().length() > 0 ) { + queryBuilder.append(" && ( "); + queryBuilder.append(jdoFilter.trim()); + queryBuilder.append(" )"); + } + } + + return queryBuilder.toString(); + } + + private String makeParameterDeclarationString(Map params) { + //Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for(String key : params.keySet() ) { + paramDecl.append(", java.lang.String " + key); + } + return paramDecl.toString(); + } + private List listMPartitionsByFilter(String dbName, String tableName, String filter, short maxParts) throws MetaException, NoSuchObjectException{ boolean success = false; @@ -976,80 +1058,102 @@ MTable mtable = getMTable(dbName, tableName); if( mtable == null ) { - throw new NoSuchObjectException("Specified database/table does not exist : " + throw new NoSuchObjectException("Specified database/table does not exist : " + dbName + "." + tableName); } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); - StringBuilder queryBuilder = new StringBuilder( - "table.tableName == t1 && table.database.name == t2"); + Query query = pm.newQuery(MPartition.class, + queryFilterString); - Map params = new HashMap(); + if( maxParts >= 0 ) { + //User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } - if( filter != null ) { + LOG.debug("Filter specified is " + filter + "," + + " JDOQL filter is " + queryFilterString); - Table table = convertToTable(mtable); + params.put("t1", tableName.trim()); + params.put("t2", dbName.trim()); - CharStream cs = new ANTLRNoCaseStringStream(filter); - FilterLexer lexer = new FilterLexer(cs); + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); - CommonTokenStream tokens = new CommonTokenStream(); - tokens.setTokenSource (lexer); + mparts = (List) query.executeWithMap(params); - FilterParser parser = new FilterParser(tokens); + LOG.debug("Done executing query for listMPartitionsByFilter"); + pm.retrieveAll(mparts); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mparts; + } - try { - parser.filter(); - } catch(RecognitionException re) { - throw new MetaException("Error parsing partition filter : " + re); - } + @Override + public List listPartitionNamesByFilter(String dbName, String tableName, + String filter, short maxParts) throws MetaException { + boolean success = false; + List partNames = new ArrayList(); + try { + openTransaction(); + LOG.debug("Executing listMPartitionsByFilter"); + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); - String jdoFilter = parser.tree.generateJDOFilter(table, params); - - if( jdoFilter.trim().length() > 0 ) { - queryBuilder.append(" && ( "); - queryBuilder.append(jdoFilter.trim()); - queryBuilder.append(" )"); - } + MTable mtable = getMTable(dbName, tableName); + if( mtable == null ) { + // To be consistent with the behavior of listPartitionNames, if the + // table or db does not exist, we return an empty list + return partNames; } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); - Query query = pm.newQuery(MPartition.class, - queryBuilder.toString()); + Query query = pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where " + queryFilterString); if( maxParts >= 0 ) { //User specified a row limit, set it on the Query query.setRange(0, maxParts); } - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder( - "java.lang.String t1, java.lang.String t2"); - for(String key : params.keySet() ) { - paramDecl.append(", java.lang.String " + key); - } - LOG.debug("Filter specified is " + filter + "," + - " JDOQL filter is " + queryBuilder.toString()); + " JDOQL filter is " + queryFilterString); params.put("t1", tableName.trim()); params.put("t2", dbName.trim()); - query.declareParameters(paramDecl.toString()); + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); query.setOrdering("partitionName ascending"); + query.setResult("partitionName"); - mparts = (List) query.executeWithMap(params); + Collection names = (Collection) query.executeWithMap(params); + partNames = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + partNames.add((String) i.next()); + } - LOG.debug("Done executing query for listMPartitionsByFilter"); - pm.retrieveAll(mparts); + LOG.debug("Done executing query for listMPartitionNamesByFilter"); success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); } finally { if (!success) { rollbackTransaction(); } } - return mparts; + return partNames; } - public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { boolean success = false; @@ -1323,4 +1427,61 @@ } return pns; } + + @Override + public List listPartitionNames(String dbName, String tableName, String nameRegex, + short max_parts) throws MetaException { + List pns = new ArrayList(); + + String regexClause = null; + if (nameRegex == null || nameRegex.length() == 0) { + regexClause = ""; + } else { + regexClause = "&& partitionName.matches(t3) "; + } + + boolean success = false; + try { + openTransaction(); + LOG.debug("Executing getPartitionNames"); + dbName = dbName.toLowerCase().trim(); + tableName = tableName.toLowerCase().trim(); + String queryStr = + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where table.database.name == t1 && table.tableName == t2 " + regexClause + + "order by partitionName asc"; + LOG.info("JDO Query string is " + queryStr); + Query q = pm.newQuery(queryStr); + q.setResult("partitionName"); + Collection names = null; + if (regexClause.length() == 0) { + q.declareParameters("java.lang.String t1, java.lang.String t2"); + names = (Collection) q.execute(dbName, tableName); + } else { + q.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + names = (Collection) q.execute(dbName, tableName, nameRegex); + } + + pns = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + pns.add((String) i.next()); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return pns; + } + + @Override + public List getPartitionsByRegex(String dbName, String tblName, String regex, + short maxParts) throws MetaException, NoSuchObjectException { + openTransaction(); + List parts = convertToParts(listMPartitions(dbName, tblName, regex, + maxParts)); + commitTransaction(); + return parts; + } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -767,4 +767,26 @@ return TableType.INDEX_TABLE.toString().equals(table.getTableType()); } + private static String escapeQuotes(String s) { + return s.replace("\"", "\\\""); + } + /** + * Given a map of partition column names to values, this creates a filter + * string that can be used for the *byFilter methods + * @param m + * @return + */ + public static String makeFilterStringFromMap(Map m) { + StringBuilder filter = new StringBuilder(); + for (Entry e : m.entrySet()) { + String col = e.getKey(); + String val = e.getValue(); + if (filter.length() == 0) { + filter.append(col + "=\"" + val + "\""); + } else { + filter.append(" and " + col + "=\"" + val + "\""); + } + } + return filter.toString(); + } } Index: README.txt =================================================================== --- README.txt (revision 9522) +++ README.txt (working copy) @@ -1,3 +1,4 @@ + How to Hive ----------- @@ -368,8 +369,8 @@ 3. In the package navigator, select the hive project, right-click and select Checkstyle > Activate Checkstyle. This will cause the checkstyle plugin to activate and analyze the project sources. - + Development Tips ------------------------ * You may use the following line to test a specific testcase with a specific query file. Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -31,12 +31,11 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.HashSet; +import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Collections; -import java.util.Comparator; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; @@ -64,11 +63,22 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.*; +import org.apache.hadoop.hive.ql.lockmgr.HiveLock; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; +import org.apache.hadoop.hive.ql.metadata.CheckResult; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; @@ -83,22 +93,18 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; +import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; -import org.apache.hadoop.hive.ql.lockmgr.HiveLock; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -2177,47 +2183,38 @@ work.getOutputs().add(new WriteEntity(tbl)); } } else { - // get all partitions of the table - List partitionNames = - db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1); - Set> partitions = new HashSet>(); - for (String partitionName : partitionNames) { + String dbName = db.getCurrentDatabase(); + String tableName = dropTbl.getTableName(); + // For each partition spec, find all the matching partition names + List allMatchingNames = new ArrayList(); + for (Map partSpec : dropTbl.getPartSpecs()) { + List matchingNames = db.getPartitionNames(dbName, + tableName, partSpec, (short)-1); + allMatchingNames.addAll(matchingNames); + console.printInfo("Adding " + matchingNames); + } + + // Using the partition names, retrieve all partitions + List partitionsToDrop = new ArrayList(); + for (String partName : allMatchingNames) { + Map spec = null; try { - partitions.add(Warehouse.makeSpecFromName(partitionName)); + spec = Warehouse.makeSpecFromName(partName); } catch (MetaException e) { - LOG.warn("Unrecognized partition name from metastore: " + partitionName); + throw new HiveException(e); } - } - // drop partitions in the list - List partsToDelete = new ArrayList(); - for (Map partSpec : dropTbl.getPartSpecs()) { - Iterator> it = partitions.iterator(); - while (it.hasNext()) { - Map part = it.next(); - // test if partSpec matches part - boolean match = true; - for (Map.Entry item : partSpec.entrySet()) { - if (!item.getValue().equals(part.get(item.getKey()))) { - match = false; - break; - } - } - if (match) { - Partition p = db.getPartition(tbl, part, false); - if (!p.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " Partition " + p.getName() + - " is protected from being dropped"); - } - partsToDelete.add(p); - it.remove(); - } + Partition p = db.getPartition(tbl, spec, false); + if (!p.canDrop()) { + throw new HiveException("Table " + tbl.getTableName() + + " Partition " + p.getName() + + " is protected from being dropped"); } + partitionsToDrop.add(p); } // drop all existing partitions from the list - for (Partition partition : partsToDelete) { + for (Partition partition : partitionsToDrop) { console.printInfo("Dropping the partition " + partition.getName()); db.dropPartition(db.getCurrentDatabase(), dropTbl.getTableName(), partition.getValues(), true); // drop data for the @@ -2348,7 +2345,7 @@ validateSerDe(crtTbl.getSerName()); tbl.setSerializationLib(crtTbl.getSerName()); } - + if (crtTbl.getFieldDelim() != null) { tbl.setSerdeParam(Constants.FIELD_DELIM, crtTbl.getFieldDelim()); tbl.setSerdeParam(Constants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());