Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 9522) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -163,7 +163,7 @@ tbl.getPartitionKeys().add( new FieldSchema("ds", Constants.STRING_TYPE_NAME, "")); tbl.getPartitionKeys().add( - new FieldSchema("hr", Constants.INT_TYPE_NAME, "")); + new FieldSchema("hr", Constants.STRING_TYPE_NAME, "")); client.createTable(tbl); @@ -1062,9 +1062,9 @@ "(p1=\"p13\" aNd p2=\"p24\")", 4); //test for and or precedence checkFilter(client, dbName, tblName, - "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); + "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); checkFilter(client, dbName, tblName, - "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); + "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2); checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4); Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy) @@ -226,14 +226,18 @@ public static String makePartName(Map spec) throws MetaException { StringBuilder suffixBuf = new StringBuilder(); + int i = 0; for (Entry e : spec.entrySet()) { if (e.getValue() == null || e.getValue().length() == 0) { throw new MetaException("Partition spec is incorrect. " + spec); } + if (i>0) { + suffixBuf.append(Path.SEPARATOR); + } suffixBuf.append(escapePathName(e.getKey())); suffixBuf.append('='); suffixBuf.append(escapePathName(e.getValue())); - suffixBuf.append(Path.SEPARATOR); + i++; } return suffixBuf.toString(); } Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -106,6 +106,9 @@ public abstract List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; + public abstract List listPartitionNamesByFilter(String db_name, + String tbl_name, String filter, short max_parts) throws MetaException; + public abstract void alterPartition(String db_name, String tbl_name, Partition new_part) throws InvalidObjectException, MetaException; Index: metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (working copy) @@ -17,17 +17,17 @@ */ package org.apache.hadoop.hive.metastore.parser; -import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Stack; +import org.antlr.runtime.ANTLRStringStream; +import org.antlr.runtime.CharStream; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.serde.Constants; -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.runtime.CharStream; - /** * The Class representing the filter as a binary tree. The tree has TreeNode's * at intermediate level and the leaf level nodes are of type LeafNode. @@ -94,7 +94,7 @@ private TreeNode rhs; public TreeNode() { - } + } public TreeNode(TreeNode lhs, LogicalOperator andOr, TreeNode rhs) { this.lhs = lhs; @@ -173,15 +173,20 @@ "Value should be on the RHS for LIKE operator : " + "Key <" + keyName + ">"); } - - filter = paramName + + else if (operator == Operator.EQUALS) { + filter = makeFilterForEquals(keyName, value, paramName, params); + } else { + filter = paramName + " " + operator.getJdoOp() + " " + " this.values.get(" + partitionIndex + ")"; + } } else { - if( operator == Operator.LIKE ) { + if (operator == Operator.LIKE ) { //generate this.values.get(i).matches("abc%") filter = " this.values.get(" + partitionIndex + ")." + operator.getJdoOp() + "(" + paramName + ") "; + } else if (operator == Operator.EQUALS) { + filter = makeFilterForEquals(keyName, value, paramName, params); } else { filter = " this.values.get(" + partitionIndex + ") " + operator.getJdoOp() + " " + paramName; @@ -192,6 +197,31 @@ } /** + * For equals, we can make the JDO query much faster by filtering based on the + * partition name. For a condition like ds="2010-10-01", we can see if there + * are any partitions with a name that contains the substring "ds=2010-10-01". + * False matches aren't possible since "=" is escaped for partition names. + * + * @param keyName name of the partition col e.g. ds + * @param value + * @param paramName name of the parameter to use for JDOQL + * @param params a map from the parameter name to their values + * @return + * @throws MetaException + */ + private static String makeFilterForEquals(String keyName, String value, + String paramName, Map params) throws MetaException { + Map partKeyToVal = new HashMap(); + partKeyToVal.put(keyName, value); + // If a partition has keys ds and hr, we make the assumption that + // makePartName with just ds or hr will return a substring of the name made + // with both ds and hr. + String escapedNameFragment = Warehouse.makePartName(partKeyToVal); + // .* is a wildcard that works for JDOQL + params.put(paramName, ".*" + escapedNameFragment + ".*"); + return "partitionName.matches(" + paramName + ")"; + } + /** * The root node for the tree. */ private TreeNode root = null; @@ -250,6 +280,7 @@ super(input); } + @Override public int LA (int i) { int returnChar = super.LA (i); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -1743,37 +1743,56 @@ } @Override - public List get_partitions_ps(String db_name, String tbl_name, - List part_vals, short max_parts) throws MetaException, - TException { + public List get_partitions_ps(final String db_name, + final String tbl_name, final List part_vals, final short max_parts) + throws MetaException, TException { incrementCounter("get_partitions_ps"); logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); - List parts = null; - List matchingParts = new ArrayList(); - // This gets all the partitions and then filters based on the specified - // criteria. An alternative approach would be to get all the partition - // names, do the filtering on the names, and get the partition for each - // of the names. that match. - + Table t; try { - parts = get_partitions(db_name, tbl_name, (short) -1); + t = get_table(db_name, tbl_name); } catch (NoSuchObjectException e) { throw new MetaException(e.getMessage()); } - for (Partition p : parts) { - if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) { - matchingParts.add(p); + if (part_vals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return matchingParts; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getPartitionsByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; } @Override - public List get_partition_names_ps(String db_name, String tbl_name, - List part_vals, short max_parts) throws MetaException, TException { + public List get_partition_names_ps(final String db_name, + final String tbl_name, final List part_vals, final short max_parts) + throws MetaException, TException { incrementCounter("get_partition_names_ps"); logStartPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); Table t; @@ -1783,23 +1802,37 @@ throw new MetaException(e.getMessage()); } - List partNames = get_partition_names(db_name, tbl_name, max_parts); - List filteredPartNames = new ArrayList(); - - for(String name : partNames) { - LinkedHashMap spec = Warehouse.makeSpecFromName(name); - List vals = new ArrayList(); - // Since we are iterating through a LinkedHashMap, iteration should - // return the partition values in the correct order for comparison. - for (String val : spec.values()) { - vals.add(val); + if (part_vals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } - if (MetaStoreUtils.pvalMatches(part_vals, vals)) { - filteredPartNames.add(name); - } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return filteredPartNames; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; } @Override Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.metastore; -import java.io.ByteArrayInputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -41,7 +40,6 @@ import org.antlr.runtime.CharStream; import org.antlr.runtime.CommonTokenStream; import org.antlr.runtime.RecognitionException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; @@ -68,10 +66,9 @@ import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MType; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.metastore.parser.FilterParser; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; import org.apache.hadoop.util.StringUtils; /** @@ -964,6 +961,51 @@ return parts; } + private String makeQueryFilterString(MTable mtable, String filter, + Map params) + throws MetaException { + StringBuilder queryBuilder = new StringBuilder( + "table.tableName == t1 && table.database.name == t2"); + + if( filter != null ) { + + Table table = convertToTable(mtable); + + CharStream cs = new ANTLRNoCaseStringStream(filter); + FilterLexer lexer = new FilterLexer(cs); + + CommonTokenStream tokens = new CommonTokenStream(); + tokens.setTokenSource (lexer); + + FilterParser parser = new FilterParser(tokens); + + try { + parser.filter(); + } catch(RecognitionException re) { + throw new MetaException("Error parsing partition filter : " + re); + } + + String jdoFilter = parser.tree.generateJDOFilter(table, params); + + if( jdoFilter.trim().length() > 0 ) { + queryBuilder.append(" && ( "); + queryBuilder.append(jdoFilter.trim()); + queryBuilder.append(" )"); + } + } + + return queryBuilder.toString(); + } + + private String makeParameterDeclarationString(Map params) { + //Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for(String key : params.keySet() ) { + paramDecl.append(", java.lang.String " + key); + } + return paramDecl.toString(); + } + private List listMPartitionsByFilter(String dbName, String tableName, String filter, short maxParts) throws MetaException, NoSuchObjectException{ boolean success = false; @@ -976,80 +1018,103 @@ MTable mtable = getMTable(dbName, tableName); if( mtable == null ) { - throw new NoSuchObjectException("Specified database/table does not exist : " + throw new NoSuchObjectException("Specified database/table does not exist : " + dbName + "." + tableName); } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); - StringBuilder queryBuilder = new StringBuilder( - "table.tableName == t1 && table.database.name == t2"); + Query query = pm.newQuery(MPartition.class, + queryFilterString); - Map params = new HashMap(); + if( maxParts >= 0 ) { + //User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } - if( filter != null ) { + LOG.debug("Filter specified is " + filter + "," + + " JDOQL filter is " + queryFilterString); - Table table = convertToTable(mtable); + params.put("t1", tableName.trim()); + params.put("t2", dbName.trim()); - CharStream cs = new ANTLRNoCaseStringStream(filter); - FilterLexer lexer = new FilterLexer(cs); + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); - CommonTokenStream tokens = new CommonTokenStream(); - tokens.setTokenSource (lexer); + mparts = (List) query.executeWithMap(params); - FilterParser parser = new FilterParser(tokens); + LOG.debug("Done executing query for listMPartitionsByFilter"); + pm.retrieveAll(mparts); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mparts; + } - try { - parser.filter(); - } catch(RecognitionException re) { - throw new MetaException("Error parsing partition filter : " + re); - } + @Override + public List listPartitionNamesByFilter(String dbName, String tableName, + String filter, short maxParts) throws MetaException { + boolean success = false; + List partNames = new ArrayList(); + try { + openTransaction(); + LOG.debug("Executing listMPartitionsByFilter"); + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); - String jdoFilter = parser.tree.generateJDOFilter(table, params); - - if( jdoFilter.trim().length() > 0 ) { - queryBuilder.append(" && ( "); - queryBuilder.append(jdoFilter.trim()); - queryBuilder.append(" )"); - } + MTable mtable = getMTable(dbName, tableName); + if( mtable == null ) { + // To be consistent with the behavior of listPartitionNames, if the + // table or db does not exist, we return an empty list + return partNames; } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); - Query query = pm.newQuery(MPartition.class, - queryBuilder.toString()); + Query query = pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where " + queryFilterString); if( maxParts >= 0 ) { //User specified a row limit, set it on the Query query.setRange(0, maxParts); } - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder( - "java.lang.String t1, java.lang.String t2"); - for(String key : params.keySet() ) { - paramDecl.append(", java.lang.String " + key); - } - LOG.debug("Filter specified is " + filter + "," + - " JDOQL filter is " + queryBuilder.toString()); + " JDOQL filter is " + queryFilterString); + LOG.debug("Parms is " + params); params.put("t1", tableName.trim()); params.put("t2", dbName.trim()); - query.declareParameters(paramDecl.toString()); + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); query.setOrdering("partitionName ascending"); + query.setResult("partitionName"); - mparts = (List) query.executeWithMap(params); + Collection names = (Collection) query.executeWithMap(params); + partNames = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + partNames.add((String) i.next()); + } - LOG.debug("Done executing query for listMPartitionsByFilter"); - pm.retrieveAll(mparts); + LOG.debug("Done executing query for listMPartitionNamesByFilter"); success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); } finally { if (!success) { rollbackTransaction(); } } - return mparts; + return partNames; } - public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { boolean success = false; Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -767,4 +767,23 @@ return TableType.INDEX_TABLE.toString().equals(table.getTableType()); } + /** + * Given a map of partition column names to values, this creates a filter + * string that can be used to call the *byFilter methods + * @param m + * @return + */ + public static String makeFilterStringFromMap(Map m) { + StringBuilder filter = new StringBuilder(); + for (Entry e : m.entrySet()) { + String col = e.getKey(); + String val = e.getValue(); + if (filter.length() == 0) { + filter.append(col + "=\"" + val + "\""); + } else { + filter.append(" and " + col + "=\"" + val + "\""); + } + } + return filter.toString(); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -31,12 +31,11 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.HashSet; +import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Collections; -import java.util.Comparator; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; @@ -64,11 +63,22 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.*; +import org.apache.hadoop.hive.ql.lockmgr.HiveLock; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; +import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; +import org.apache.hadoop.hive.ql.metadata.CheckResult; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; +import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; @@ -83,22 +93,18 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; -import org.apache.hadoop.hive.ql.plan.LockTableDesc; -import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; +import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; -import org.apache.hadoop.hive.ql.lockmgr.HiveLock; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; -import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -2177,47 +2183,38 @@ work.getOutputs().add(new WriteEntity(tbl)); } } else { - // get all partitions of the table - List partitionNames = - db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1); - Set> partitions = new HashSet>(); - for (String partitionName : partitionNames) { + String dbName = db.getCurrentDatabase(); + String tableName = dropTbl.getTableName(); + // For each partition spec, find all the matching partition names + List allMatchingNames = new ArrayList(); + for (Map partSpec : dropTbl.getPartSpecs()) { + List matchingNames = db.getPartitionNames(dbName, + tableName, partSpec, (short)-1); + allMatchingNames.addAll(matchingNames); + console.printInfo("Adding " + matchingNames); + } + + // Using the partition names, retrieve all partitions + List partitionsToDrop = new ArrayList(); + for (String partName : allMatchingNames) { + Map spec = null; try { - partitions.add(Warehouse.makeSpecFromName(partitionName)); + spec = Warehouse.makeSpecFromName(partName); } catch (MetaException e) { - LOG.warn("Unrecognized partition name from metastore: " + partitionName); + throw new HiveException(e); } - } - // drop partitions in the list - List partsToDelete = new ArrayList(); - for (Map partSpec : dropTbl.getPartSpecs()) { - Iterator> it = partitions.iterator(); - while (it.hasNext()) { - Map part = it.next(); - // test if partSpec matches part - boolean match = true; - for (Map.Entry item : partSpec.entrySet()) { - if (!item.getValue().equals(part.get(item.getKey()))) { - match = false; - break; - } - } - if (match) { - Partition p = db.getPartition(tbl, part, false); - if (!p.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " Partition " + p.getName() + - " is protected from being dropped"); - } - partsToDelete.add(p); - it.remove(); - } + Partition p = db.getPartition(tbl, spec, false); + if (!p.canDrop()) { + throw new HiveException("Table " + tbl.getTableName() + + " Partition " + p.getName() + + " is protected from being dropped"); } + partitionsToDrop.add(p); } // drop all existing partitions from the list - for (Partition partition : partsToDelete) { + for (Partition partition : partitionsToDrop) { console.printInfo("Dropping the partition " + partition.getName()); db.dropPartition(db.getCurrentDatabase(), dropTbl.getTableName(), partition.getValues(), true); // drop data for the @@ -2348,7 +2345,7 @@ validateSerDe(crtTbl.getSerName()); tbl.setSerializationLib(crtTbl.getSerName()); } - + if (crtTbl.getFieldDelim() != null) { tbl.setSerdeParam(Constants.FIELD_DELIM, crtTbl.getFieldDelim()); tbl.setSerdeParam(Constants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());