Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 9522) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -123,6 +123,10 @@ vals3 = new ArrayList(2); vals3.add("2008-07-02 14:13:12"); vals3.add("15"); + List vals4 = new ArrayList(2); + vals4 = new ArrayList(2); + vals4.add("2008-07-03 14:13:12"); + vals4.add("151"); client.dropTable(dbName, tblName); silentDropDatabase(dbName); @@ -163,7 +167,7 @@ tbl.getPartitionKeys().add( new FieldSchema("ds", Constants.STRING_TYPE_NAME, "")); tbl.getPartitionKeys().add( - new FieldSchema("hr", Constants.INT_TYPE_NAME, "")); + new FieldSchema("hr", Constants.STRING_TYPE_NAME, "")); client.createTable(tbl); @@ -200,8 +204,17 @@ part3.setParameters(new HashMap()); part3.setSd(tbl.getSd()); part3.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); - part3.getSd().setLocation(tbl.getSd().getLocation() + "/part2"); + part3.getSd().setLocation(tbl.getSd().getLocation() + "/part3"); + Partition part4 = new Partition(); + part4.setDbName(dbName); + part4.setTableName(tblName); + part4.setValues(vals4); + part4.setParameters(new HashMap()); + part4.setSd(tbl.getSd()); + part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); + part4.getSd().setLocation(tbl.getSd().getLocation() + "/part4"); + // check if the partition exists (it shouldn;t) boolean exceptionThrown = false; try { @@ -218,6 +231,8 @@ assertNotNull("Unable to create partition " + part2, retp2); Partition retp3 = client.add_partition(part3); assertNotNull("Unable to create partition " + part3, retp3); + Partition retp4 = client.add_partition(part4); + assertNotNull("Unable to create partition " + part4, retp4); Partition part_get = client.getPartition(dbName, tblName, part.getValues()); if(isThriftClient) { @@ -268,7 +283,7 @@ partialVals.add(vals2.get(1)); partial = client.listPartitions(dbName, tblName, partialVals, (short) -1); - assertTrue("Should have returned 2 partitions", partial.size() == 2); + assertEquals("Should have returned 2 partitions", 2, partial.size()); assertTrue("Not all parts returned", partial.containsAll(parts)); partNames.clear(); @@ -276,7 +291,7 @@ partNames.add(part3Name); partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1); - assertTrue("Should have returned 2 partition names", partialNames.size() == 2); + assertEquals("Should have returned 2 partition names", 2, partialNames.size()); assertTrue("Not all part names returned", partialNames.containsAll(partNames)); // Verify escaped partition names don't return partitions @@ -299,15 +314,15 @@ // Test append_partition_by_name client.appendPartition(dbName, tblName, partName); - Partition part4 = client.getPartition(dbName, tblName, part.getValues()); - assertTrue("Append partition by name failed", part4.getValues().equals(vals));; - Path part4Path = new Path(part4.getSd().getLocation()); - assertTrue(fs.exists(part4Path)); + Partition part5 = client.getPartition(dbName, tblName, part.getValues()); + assertTrue("Append partition by name failed", part5.getValues().equals(vals));; + Path part5Path = new Path(part5.getSd().getLocation()); + assertTrue(fs.exists(part5Path)); // Test drop_partition_by_name assertTrue("Drop partition by name failed", client.dropPartition(dbName, tblName, partName, true)); - assertFalse(fs.exists(part4Path)); + assertFalse(fs.exists(part5Path)); // add the partition again so that drop table with a partition can be // tested @@ -1062,9 +1077,9 @@ "(p1=\"p13\" aNd p2=\"p24\")", 4); //test for and or precedence checkFilter(client, dbName, tblName, - "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); + "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); checkFilter(client, dbName, tblName, - "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); + "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2); checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4); Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy) @@ -223,21 +223,40 @@ * @return string representation of the partition specification. * @throws MetaException */ - public static String makePartName(Map spec) + public static String makePartPath(Map spec) throws MetaException { + return makePartName(spec, true); + } + + /** + * Makes a partition name from a specification + * @param spec + * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/' + * @return + * @throws MetaException + */ + public static String makePartName(Map spec, + boolean addTrailingSeperator) + throws MetaException { StringBuilder suffixBuf = new StringBuilder(); + int i = 0; for (Entry e : spec.entrySet()) { if (e.getValue() == null || e.getValue().length() == 0) { throw new MetaException("Partition spec is incorrect. " + spec); } + if (i>0) { + suffixBuf.append(Path.SEPARATOR); + } suffixBuf.append(escapePathName(e.getKey())); suffixBuf.append('='); suffixBuf.append(escapePathName(e.getValue())); + i++; + } + if (addTrailingSeperator) { suffixBuf.append(Path.SEPARATOR); } return suffixBuf.toString(); } - /** * Given a dynamic partition specification, return the path corresponding to the * static part of partition specification. This is basically a copy of makePartName @@ -296,12 +315,12 @@ public Path getPartitionPath(String dbName, String tableName, LinkedHashMap pm) throws MetaException { - return new Path(getDefaultTablePath(dbName, tableName), makePartName(pm)); + return new Path(getDefaultTablePath(dbName, tableName), makePartPath(pm)); } public Path getPartitionPath(Path tblPath, LinkedHashMap pm) throws MetaException { - return new Path(tblPath, makePartName(pm)); + return new Path(tblPath, makePartPath(pm)); } public boolean isDir(Path f) throws MetaException { Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -106,6 +106,9 @@ public abstract List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; + public abstract List listPartitionNamesByFilter(String db_name, + String tbl_name, String filter, short max_parts) throws MetaException; + public abstract void alterPartition(String db_name, String tbl_name, Partition new_part) throws InvalidObjectException, MetaException; Index: metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (working copy) @@ -17,17 +17,17 @@ */ package org.apache.hadoop.hive.metastore.parser; -import java.io.IOException; +import java.util.HashMap; import java.util.Map; import java.util.Stack; +import org.antlr.runtime.ANTLRStringStream; +import org.antlr.runtime.CharStream; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.serde.Constants; -import org.antlr.runtime.ANTLRStringStream; -import org.antlr.runtime.CharStream; - /** * The Class representing the filter as a binary tree. The tree has TreeNode's * at intermediate level and the leaf level nodes are of type LeafNode. @@ -94,7 +94,7 @@ private TreeNode rhs; public TreeNode() { - } + } public TreeNode(TreeNode lhs, LogicalOperator andOr, TreeNode rhs) { this.lhs = lhs; @@ -140,22 +140,25 @@ @Override public String generateJDOFilter(Table table, Map params) throws MetaException { - int partitionIndex; - for(partitionIndex = 0; - partitionIndex < table.getPartitionKeys().size(); - partitionIndex++ ) { - if( table.getPartitionKeys().get(partitionIndex).getName(). + + int partitionColumnCount = table.getPartitionKeys().size(); + int partitionColumnIndex; + for(partitionColumnIndex = 0; + partitionColumnIndex < partitionColumnCount; + partitionColumnIndex++ ) { + if( table.getPartitionKeys().get(partitionColumnIndex).getName(). equalsIgnoreCase(keyName)) { break; } } + assert (table.getPartitionKeys().size() > 0); - if( partitionIndex == table.getPartitionKeys().size() ) { + if( partitionColumnIndex == table.getPartitionKeys().size() ) { throw new MetaException("Specified key <" + keyName + "> is not a partitioning key for the table"); } - if( ! table.getPartitionKeys().get(partitionIndex). + if( ! table.getPartitionKeys().get(partitionColumnIndex). getType().equals(Constants.STRING_TYPE_NAME) ) { throw new MetaException ("Filtering is supported only on partition keys of type string"); @@ -173,17 +176,24 @@ "Value should be on the RHS for LIKE operator : " + "Key <" + keyName + ">"); } - - filter = paramName + + else if (operator == Operator.EQUALS) { + filter = makeFilterForEquals(keyName, value, paramName, params, + partitionColumnIndex, partitionColumnCount); + } else { + filter = paramName + " " + operator.getJdoOp() + " " + - " this.values.get(" + partitionIndex + ")"; + " this.values.get(" + partitionColumnIndex + ")"; + } } else { - if( operator == Operator.LIKE ) { + if (operator == Operator.LIKE ) { //generate this.values.get(i).matches("abc%") - filter = " this.values.get(" + partitionIndex + ")." + filter = " this.values.get(" + partitionColumnIndex + ")." + operator.getJdoOp() + "(" + paramName + ") "; + } else if (operator == Operator.EQUALS) { + filter = makeFilterForEquals(keyName, value, paramName, params, + partitionColumnIndex, partitionColumnCount); } else { - filter = " this.values.get(" + partitionIndex + ") " + filter = " this.values.get(" + partitionColumnIndex + ") " + operator.getJdoOp() + " " + paramName; } } @@ -192,6 +202,46 @@ } /** + * For equals, we can make the JDO query much faster by filtering based on the + * partition name. For a condition like ds="2010-10-01", we can see if there + * are any partitions with a name that contains the substring "ds=2010-10-01/" + * False matches aren't possible since "=" is escaped for partition names + * and the trailing '/' ensures that we won't get a match with ds=2010-10-011 + * + * Two cases to keep in mind: Case with only one partition column (no '/'s) + * Case where the partition key column is at the end of the name. (no + * tailing '/') + * + * @param keyName name of the partition col e.g. ds + * @param value + * @param paramName name of the parameter to use for JDOQL + * @param params a map from the parameter name to their values + * @return + * @throws MetaException + */ + private static String makeFilterForEquals(String keyName, String value, + String paramName, Map params, int keyPos, int keyCount) + throws MetaException { + Map partKeyToVal = new HashMap(); + partKeyToVal.put(keyName, value); + // If a partition has multiple partition keys, we make the assumption that + // makePartName with one key will return a substring of the name made + // with both all the keys. + String escapedNameFragment = Warehouse.makePartName(partKeyToVal, false); + + if (keyCount == 1) { + // Case where this is no other partition columns + params.put(paramName, escapedNameFragment); + } else if (keyPos + 1 == keyCount) { + // Case where the partition column is at the end of the name. There will + // be a leading '/' but no trailing '/' + params.put(paramName, ".*/" + escapedNameFragment); + } else { + params.put(paramName, ".*" + escapedNameFragment + "/.*"); + } + return "partitionName.matches(" + paramName + ")"; + } + /** * The root node for the tree. */ private TreeNode root = null; @@ -250,6 +300,7 @@ super(input); } + @Override public int LA (int i) { int returnChar = super.LA (i); Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -1743,37 +1743,56 @@ } @Override - public List get_partitions_ps(String db_name, String tbl_name, - List part_vals, short max_parts) throws MetaException, - TException { + public List get_partitions_ps(final String db_name, + final String tbl_name, final List part_vals, final short max_parts) + throws MetaException, TException { incrementCounter("get_partitions_ps"); logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); - List parts = null; - List matchingParts = new ArrayList(); - // This gets all the partitions and then filters based on the specified - // criteria. An alternative approach would be to get all the partition - // names, do the filtering on the names, and get the partition for each - // of the names. that match. - + Table t; try { - parts = get_partitions(db_name, tbl_name, (short) -1); + t = get_table(db_name, tbl_name); } catch (NoSuchObjectException e) { throw new MetaException(e.getMessage()); } - for (Partition p : parts) { - if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) { - matchingParts.add(p); + if (part_vals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return matchingParts; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getPartitionsByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; } @Override - public List get_partition_names_ps(String db_name, String tbl_name, - List part_vals, short max_parts) throws MetaException, TException { + public List get_partition_names_ps(final String db_name, + final String tbl_name, final List part_vals, final short max_parts) + throws MetaException, TException { incrementCounter("get_partition_names_ps"); logStartPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); Table t; @@ -1783,23 +1802,37 @@ throw new MetaException(e.getMessage()); } - List partNames = get_partition_names(db_name, tbl_name, max_parts); - List filteredPartNames = new ArrayList(); - - for(String name : partNames) { - LinkedHashMap spec = Warehouse.makeSpecFromName(name); - List vals = new ArrayList(); - // Since we are iterating through a LinkedHashMap, iteration should - // return the partition values in the correct order for comparison. - for (String val : spec.values()) { - vals.add(val); + if (part_vals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } - if (MetaStoreUtils.pvalMatches(part_vals, vals)) { - filteredPartNames.add(name); - } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return filteredPartNames; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; } @Override Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.metastore; -import java.io.ByteArrayInputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -41,7 +40,6 @@ import org.antlr.runtime.CharStream; import org.antlr.runtime.CommonTokenStream; import org.antlr.runtime.RecognitionException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; @@ -68,10 +66,9 @@ import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MType; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree; -import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.metastore.parser.FilterParser; +import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream; import org.apache.hadoop.util.StringUtils; /** @@ -964,6 +961,51 @@ return parts; } + private String makeQueryFilterString(MTable mtable, String filter, + Map params) + throws MetaException { + StringBuilder queryBuilder = new StringBuilder( + "table.tableName == t1 && table.database.name == t2"); + + if( filter != null && filter.length() > 0) { + + Table table = convertToTable(mtable); + + CharStream cs = new ANTLRNoCaseStringStream(filter); + FilterLexer lexer = new FilterLexer(cs); + + CommonTokenStream tokens = new CommonTokenStream(); + tokens.setTokenSource (lexer); + + FilterParser parser = new FilterParser(tokens); + + try { + parser.filter(); + } catch(RecognitionException re) { + throw new MetaException("Error parsing partition filter : " + re); + } + + String jdoFilter = parser.tree.generateJDOFilter(table, params); + + if( jdoFilter.trim().length() > 0 ) { + queryBuilder.append(" && ( "); + queryBuilder.append(jdoFilter.trim()); + queryBuilder.append(" )"); + } + } + + return queryBuilder.toString(); + } + + private String makeParameterDeclarationString(Map params) { + //Create the parameter declaration string + StringBuilder paramDecl = new StringBuilder(); + for(String key : params.keySet() ) { + paramDecl.append(", java.lang.String " + key); + } + return paramDecl.toString(); + } + private List listMPartitionsByFilter(String dbName, String tableName, String filter, short maxParts) throws MetaException, NoSuchObjectException{ boolean success = false; @@ -976,80 +1018,103 @@ MTable mtable = getMTable(dbName, tableName); if( mtable == null ) { - throw new NoSuchObjectException("Specified database/table does not exist : " + throw new NoSuchObjectException("Specified database/table does not exist : " + dbName + "." + tableName); } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); - StringBuilder queryBuilder = new StringBuilder( - "table.tableName == t1 && table.database.name == t2"); + Query query = pm.newQuery(MPartition.class, + queryFilterString); - Map params = new HashMap(); + if( maxParts >= 0 ) { + //User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } - if( filter != null ) { + LOG.debug("Filter specified is " + filter + "," + + " JDOQL filter is " + queryFilterString); - Table table = convertToTable(mtable); + params.put("t1", tableName.trim()); + params.put("t2", dbName.trim()); - CharStream cs = new ANTLRNoCaseStringStream(filter); - FilterLexer lexer = new FilterLexer(cs); + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); - CommonTokenStream tokens = new CommonTokenStream(); - tokens.setTokenSource (lexer); + mparts = (List) query.executeWithMap(params); - FilterParser parser = new FilterParser(tokens); + LOG.debug("Done executing query for listMPartitionsByFilter"); + pm.retrieveAll(mparts); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mparts; + } - try { - parser.filter(); - } catch(RecognitionException re) { - throw new MetaException("Error parsing partition filter : " + re); - } + @Override + public List listPartitionNamesByFilter(String dbName, String tableName, + String filter, short maxParts) throws MetaException { + boolean success = false; + List partNames = new ArrayList(); + try { + openTransaction(); + LOG.debug("Executing listMPartitionsByFilter"); + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); - String jdoFilter = parser.tree.generateJDOFilter(table, params); - - if( jdoFilter.trim().length() > 0 ) { - queryBuilder.append(" && ( "); - queryBuilder.append(jdoFilter.trim()); - queryBuilder.append(" )"); - } + MTable mtable = getMTable(dbName, tableName); + if( mtable == null ) { + // To be consistent with the behavior of listPartitionNames, if the + // table or db does not exist, we return an empty list + return partNames; } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); - Query query = pm.newQuery(MPartition.class, - queryBuilder.toString()); + Query query = pm.newQuery( + "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + + "where " + queryFilterString); if( maxParts >= 0 ) { //User specified a row limit, set it on the Query query.setRange(0, maxParts); } - //Create the parameter declaration string - StringBuilder paramDecl = new StringBuilder( - "java.lang.String t1, java.lang.String t2"); - for(String key : params.keySet() ) { - paramDecl.append(", java.lang.String " + key); - } - LOG.debug("Filter specified is " + filter + "," + - " JDOQL filter is " + queryBuilder.toString()); + " JDOQL filter is " + queryFilterString); + LOG.debug("Parms is " + params); params.put("t1", tableName.trim()); params.put("t2", dbName.trim()); - query.declareParameters(paramDecl.toString()); + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); query.setOrdering("partitionName ascending"); + query.setResult("partitionName"); - mparts = (List) query.executeWithMap(params); + Collection names = (Collection) query.executeWithMap(params); + partNames = new ArrayList(); + for (Iterator i = names.iterator(); i.hasNext();) { + partNames.add((String) i.next()); + } - LOG.debug("Done executing query for listMPartitionsByFilter"); - pm.retrieveAll(mparts); + LOG.debug("Done executing query for listMPartitionNamesByFilter"); success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); } finally { if (!success) { rollbackTransaction(); } } - return mparts; + return partNames; } - public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { boolean success = false; Index: metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (revision 9522) +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (working copy) @@ -767,4 +767,23 @@ return TableType.INDEX_TABLE.toString().equals(table.getTableType()); } + /** + * Given a map of partition column names to values, this creates a filter + * string that can be used to call the *byFilter methods + * @param m + * @return + */ + public static String makeFilterStringFromMap(Map m) { + StringBuilder filter = new StringBuilder(); + for (Entry e : m.entrySet()) { + String col = e.getKey(); + String val = e.getValue(); + if (filter.length() == 0) { + filter.append(col + "=\"" + val + "\""); + } else { + filter.append(" and " + col + "=\"" + val + "\""); + } + } + return filter.toString(); + } } Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (working copy) @@ -42,7 +42,7 @@ /** * Check the metastore for inconsistencies, data missing in either the * metastore or on the dfs. - * + * * @param dbName * name of the database, if not specified the default will be used. * @param tableName @@ -95,7 +95,7 @@ /** * Check for table directories that aren't in the metastore. - * + * * @param dbName * Name of the database * @param tables @@ -144,7 +144,7 @@ /** * Check the metastore for inconsistencies, data missing in either the * metastore or on the dfs. - * + * * @param dbName * Name of the database * @param tableName @@ -189,7 +189,7 @@ if (part == null) { PartitionResult pr = new PartitionResult(); pr.setTableName(tableName); - pr.setPartitionName(Warehouse.makePartName(map)); + pr.setPartitionName(Warehouse.makePartPath(map)); result.getPartitionsNotInMs().add(pr); } else { parts.add(part); @@ -204,7 +204,7 @@ /** * Check the metastore for inconsistencies, data missing in either the * metastore or on the dfs. - * + * * @param table * Table to check * @param parts @@ -259,7 +259,7 @@ /** * Find partitions on the fs that are unknown to the metastore. - * + * * @param table * Table where the partitions would be located * @param partPaths @@ -301,7 +301,7 @@ /** * Get the partition name from the path. - * + * * @param tablePath * Path of the table. * @param partitionPath @@ -326,9 +326,9 @@ /** * Recursive method to get the leaf directories of a base path. Example: * base/dir1/dir2 base/dir3 - * + * * This will return dir2 and dir3 but not dir1. - * + * * @param basePath * Start directory * @param allDirs Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (working copy) @@ -423,7 +423,7 @@ public String toString() { String pn = "Invalid Partition"; try { - pn = Warehouse.makePartName(getSpec()); + pn = Warehouse.makePartName(getSpec(), false); } catch (MetaException e) { // ignore as we most probably in an exception path already otherwise this // error wouldn't occur Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -916,7 +916,7 @@ // the table's location (even if the table is marked external) fs = FileSystem.get(tbl.getDataLocation(), getConf()); partPath = new Path(tbl.getDataLocation().getPath(), - Warehouse.makePartName(partSpec)); + Warehouse.makePartPath(partSpec)); } else { // Partition exists already. Get the path from the partition. This will // get the default path for Hive created partitions or the external path Index: ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java (working copy) @@ -299,7 +299,7 @@ PartitionStatistics newPartStats = new PartitionStatistics(); // In that case of a partition, the key for stats temporary store is "rootDir/[dynamic_partition_specs/]%" - String partitionID = work.getAggKey() + Warehouse.makePartName(partn.getSpec()); + String partitionID = work.getAggKey() + Warehouse.makePartPath(partn.getSpec()); String rows = statsAggregator.aggregateStats(partitionID, StatsSetupConst.ROW_COUNT); if (rows != null) { Index: ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/index/compact/IndexMetadataChangeTask.java (working copy) @@ -38,7 +38,7 @@ @Override protected int execute(DriverContext driverContext) { - + try { Hive db = Hive.get(conf); IndexMetadataChangeWork work = this.getWork(); @@ -58,19 +58,20 @@ console.printError("Index table is partitioned, but no partition specified."); return 1; } - + if (work.getPartSpec() != null) { Partition part = db.getPartition(tbl, work.getPartSpec(), false); if (part == null) { - console.printError("Partition " + Warehouse.makePartName(work.getPartSpec()).toString() + console.printError("Partition " + + Warehouse.makePartName(work.getPartSpec(), false).toString() + " does not exist."); return 1; } - + Path url = new Path(part.getDataLocation().toString()); FileSystem fs = url.getFileSystem(conf); FileStatus fstat = fs.getFileStatus(url); - + part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime())); db.alterPartition(tbl.getTableName(), part); } else { @@ -93,7 +94,7 @@ public String getName() { return "IndexMetadataChangeTask"; } - + @Override public int getType() { return StageType.DDL; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -3631,7 +3631,7 @@ if (dest_part != null) { try { - String staticSpec = Warehouse.makePartName(dest_part.getSpec()); + String staticSpec = Warehouse.makePartPath(dest_part.getSpec()); fileSinkDesc.setStaticSpec(staticSpec); } catch (MetaException e) { throw new SemanticException(e); @@ -7142,7 +7142,7 @@ for (ExecDriver mrtask: mrtasks) { try { ContentSummary inputSummary = Utilities.getInputSummary - (ctx, (MapredWork)mrtask.getWork(), p); + (ctx, mrtask.getWork(), p); int numReducers = getNumberOfReducers(mrtask.getWork(), conf); if (LOG.isDebugEnabled()) { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 9522) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -448,7 +448,8 @@ Partition part = db.getPartition(baseTbl, partSpec, false); if (part == null) { throw new HiveException("Partition " - + Warehouse.makePartName(partSpec) + " does not exist in table " + + Warehouse.makePartName(partSpec, false) + + " does not exist in table " + baseTbl.getTableName()); } baseTblPartitions.add(part);