Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 1135227) +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy) @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -277,17 +278,20 @@ } /** - * Makes a partition name from a specification - * @param spec - * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/' - * @return + * A common function for constructing a partition name + * @param spec the partition spec + * @param addTrailingSeparator whether to add a trailing separator at the end + * @param isSorted whether the partition name should be sorted by key + * @return a partition name * @throws MetaException */ - public static String makePartName(Map spec, - boolean addTrailingSeperator) - throws MetaException { + private static String makePartNameCommon(Map spec, + boolean addTrailingSeparator, boolean isSorted) throws MetaException { StringBuilder suffixBuf = new StringBuilder(); int i = 0; + if (isSorted) { + spec = new TreeMap(spec); + } for (Entry e : spec.entrySet()) { if (e.getValue() == null || e.getValue().length() == 0) { throw new MetaException("Partition spec is incorrect. " + spec); @@ -300,12 +304,40 @@ suffixBuf.append(escapePathName(e.getValue())); i++; } - if (addTrailingSeperator) { + if (addTrailingSeparator) { suffixBuf.append(Path.SEPARATOR); } return suffixBuf.toString(); } + /** + * Makes a partition name from a specification + * @param spec + * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/' + * @return + * @throws MetaException + */ + public static String makePartName(Map spec, + boolean addTrailingSeperator) + throws MetaException { + return makePartNameCommon(spec, addTrailingSeperator, false); + } + + /** + * Makes a partition name from a specification. The keys/value pairs in the partition + * name are sorted by key name. E.g., created=4/ds=1/hr=3. Sorting the keys is useful + * for comparison of partition names. + * @param spec the partition spec + * @param addTrailingSeparator whether to add a trailing separator at the end of the name + * @return a partition name in which the keys are sorted + * @throws MetaException + */ + public static String makeSortedPartName(Map spec, + boolean addTrailingSeparator) throws MetaException { + return makePartNameCommon(spec, addTrailingSeparator, true); + } + + /** * Given a dynamic partition specification, return the path corresponding to the * static part of partition specification. This is basically a copy of makePartName * but we get rid of MetaException since it is not serializable. @@ -407,5 +439,4 @@ values.addAll(partSpec.values()); return values; } - } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1135227) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -34,14 +34,14 @@ import java.util.Collections; import java.util.Comparator; import java.util.Date; -import java.util.HashSet; +import java.util.Hashtable; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -97,6 +97,7 @@ import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; import org.apache.hadoop.hive.ql.plan.AlterIndexDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; @@ -129,7 +130,6 @@ import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; -import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.serde.Constants; @@ -162,6 +162,7 @@ private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX; private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX; + @Override public boolean requireLock() { return this.work != null && this.work.getNeedLock(); } @@ -2926,13 +2927,16 @@ dropTbl.getExpectView()); } - // get all partitions of the table + // get all partition names of the table List partitionNames = db.getPartitionNames(dropTbl.getTableName(), (short) -1); - Set> partitions = new HashSet>(); + //Build a hashtable of a "sorted partition name" to the original partition spec + //We need a sorted partition name so we can compare partitions by their names + Hashtable> partitions = new Hashtable>(); for (String partitionName : partitionNames) { try { - partitions.add(Warehouse.makeSpecFromName(partitionName)); + Map spec = Warehouse.makeSpecFromName(partitionName); + partitions.put(Warehouse.makeSortedPartName(spec, false), spec); } catch (MetaException e) { LOG.warn("Unrecognized partition name from metastore: " + partitionName); } @@ -2940,28 +2944,26 @@ // drop partitions in the list List partsToDelete = new ArrayList(); for (Map partSpec : dropTbl.getPartSpecs()) { - Iterator> it = partitions.iterator(); - while (it.hasNext()) { - Map part = it.next(); - // test if partSpec matches part - boolean match = true; - for (Map.Entry item : partSpec.entrySet()) { - if (!item.getValue().equals(part.get(item.getKey()))) { - match = false; - break; - } + String partSpecStr; + try { + partSpecStr = Warehouse.makeSortedPartName(partSpec, false); + } catch (MetaException e) { + //If the partition spec to remove is empty or null, just move on gracefully + LOG.warn(e.getMessage()); + continue; + } + Map origPart = null; + //If the partition we wish to delete exists in our hashtable of all partition names, + //get the partition and add it to our list of partitions to delete + if ((origPart = partitions.get(partSpecStr)) != null) { + Partition p = db.getPartition(tbl, origPart, false); + if (!p.canDrop()) { + throw new HiveException("Table " + tbl.getTableName() + + " Partition " + p.getName() + + " is protected from being dropped"); } - if (match) { - Partition p = db.getPartition(tbl, part, false); - if (!p.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " Partition " + p.getName() + - " is protected from being dropped"); - } - - partsToDelete.add(p); - it.remove(); - } + partsToDelete.add(p); + partitions.remove(partSpecStr); } }