Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java =================================================================== --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1058396) +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy) @@ -990,10 +990,170 @@ * @throws Exception */ public void testPartitionFilter() throws Exception { - // Tests listMPartitionsByFilter() introduced by HIVE-1609. Temporarily - // disabled until issues identified by HIVE-1853 are resolved. + String dbName = "filterdb"; + String tblName = "filtertbl"; + + List vals = new ArrayList(3); + vals.add("p11"); + vals.add("p21"); + vals.add("p31"); + List vals2 = new ArrayList(3); + vals2.add("p11"); + vals2.add("p22"); + vals2.add("p31"); + List vals3 = new ArrayList(3); + vals3.add("p12"); + vals3.add("p21"); + vals3.add("p31"); + List vals4 = new ArrayList(3); + vals4.add("p12"); + vals4.add("p23"); + vals4.add("p31"); + List vals5 = new ArrayList(3); + vals5.add("p13"); + vals5.add("p24"); + vals5.add("p31"); + List vals6 = new ArrayList(3); + vals6.add("p13"); + vals6.add("p25"); + vals6.add("p31"); + + silentDropDatabase(dbName); + + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); + + ArrayList cols = new ArrayList(2); + cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, "")); + + ArrayList partCols = new ArrayList(3); + partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, "")); + partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, "")); + partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, "")); + + Table tbl = new Table(); + tbl.setDbName(dbName); + tbl.setTableName(tblName); + StorageDescriptor sd = new StorageDescriptor(); + tbl.setSd(sd); + sd.setCols(cols); + sd.setCompressed(false); + sd.setNumBuckets(1); + sd.setParameters(new HashMap()); + sd.setBucketCols(new ArrayList()); + sd.setSerdeInfo(new SerDeInfo()); + sd.getSerdeInfo().setName(tbl.getTableName()); + sd.getSerdeInfo().setParameters(new HashMap()); + sd.getSerdeInfo().getParameters() + .put(Constants.SERIALIZATION_FORMAT, "1"); + sd.setSortCols(new ArrayList()); + + tbl.setPartitionKeys(partCols); + client.createTable(tbl); + + tbl = client.getTable(dbName, tblName); + + add_partition(client, tbl, vals, "part1"); + add_partition(client, tbl, vals2, "part2"); + add_partition(client, tbl, vals3, "part3"); + add_partition(client, tbl, vals4, "part4"); + add_partition(client, tbl, vals5, "part5"); + add_partition(client, tbl, vals6, "part6"); + + checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2); + checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2); + checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2); + checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1); + checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1); + checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3); + checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4); + + checkFilter(client, dbName, tblName, + "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3); + checkFilter(client, dbName, tblName, + "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " + + "(p1=\"p13\" aNd p2=\"p24\")", 4); + //test for and or precedence + checkFilter(client, dbName, tblName, + "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1); + checkFilter(client, dbName, tblName, + "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2); + + checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2); + checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4); + checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2); + checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4); + checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4); + checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6); + checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1); + + //Test for setting the maximum partition count + List partitions = client.listPartitionsByFilter(dbName, + tblName, "p1 >= \"p12\"", (short) 2); + assertEquals("User specified row limit for partitions", + 2, partitions.size()); + + //Negative tests + Exception me = null; + try { + client.listPartitionsByFilter(dbName, + tblName, "p3 >= \"p12\"", (short) -1); + } catch(MetaException e) { + me = e; + } + assertNotNull(me); + assertTrue("Filter on int partition key", me.getMessage().contains( + "Filtering is supported only on partition keys of type string")); + + me = null; + try { + client.listPartitionsByFilter(dbName, + tblName, "c1 >= \"p12\"", (short) -1); + } catch(MetaException e) { + me = e; + } + assertNotNull(me); + assertTrue("Filter on invalid key", me.getMessage().contains( + " is not a partitioning key for the table")); + + me = null; + try { + client.listPartitionsByFilter(dbName, + tblName, "c1 >= ", (short) -1); + } catch(MetaException e) { + me = e; + } + assertNotNull(me); + assertTrue("Invalid filter string", me.getMessage().contains( + "Error parsing partition filter")); + + me = null; + try { + client.listPartitionsByFilter("invDBName", + "invTableName", "p1 = \"p11\"", (short) -1); + } catch(NoSuchObjectException e) { + me = e; + } + assertNotNull(me); + assertTrue("NoSuchObject exception", me.getMessage().contains( + "database/table does not exist")); + + client.dropTable(dbName, tblName); + client.dropDatabase(dbName); } + private void checkFilter(HiveMetaStoreClient client, String dbName, + String tblName, String filter, int expectedCount) + throws MetaException, NoSuchObjectException, TException { + List partitions = client.listPartitionsByFilter(dbName, + tblName, filter, (short) -1); + + assertEquals("Partition count expected for filter " + filter, + expectedCount, partitions.size()); + } + private void add_partition(HiveMetaStoreClient client, Table table, List vals, String location) throws InvalidObjectException, AlreadyExistsException, MetaException, TException { Index: metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (revision 1058396) +++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java (working copy) @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.serde.Constants; +import org.apache.hadoop.hive.common.FileUtils; /** * The Class representing the filter as a binary tree. The tree has TreeNode's @@ -168,6 +169,17 @@ params.put(paramName, value); String filter; + String keyEqual = FileUtils.escapePathName(keyName) + "="; + int keyEqualLength = keyEqual.length(); + String valString; + // partitionname ==> (key=value/)*(key=value) + if (partitionColumnCount == 1) { + valString = "partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ")"; + } + else { + valString = "partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ").substring(0, partitionName.substring(partitionName.indexOf(\"" + keyEqual + "\")+" + keyEqualLength + ").indexOf(\"/\"))"; + } + //Handle "a > 10" and "10 > a" appropriately if (isReverseOrder){ //For LIKE, the value should be on the RHS @@ -181,19 +193,18 @@ partitionColumnIndex, partitionColumnCount); } else { filter = paramName + - " " + operator.getJdoOp() + " " + - " this.values.get(" + partitionColumnIndex + ")"; + " " + operator.getJdoOp() + " " + valString; } } else { if (operator == Operator.LIKE ) { //generate this.values.get(i).matches("abc%") - filter = " this.values.get(" + partitionColumnIndex + ")." + filter = " " + valString + "." + operator.getJdoOp() + "(" + paramName + ") "; } else if (operator == Operator.EQUALS) { filter = makeFilterForEquals(keyName, value, paramName, params, partitionColumnIndex, partitionColumnCount); } else { - filter = " this.values.get(" + partitionColumnIndex + ") " + filter = " " + valString + " " + operator.getJdoOp() + " " + paramName; } } Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1058396) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -1892,27 +1892,45 @@ incrementCounter("get_partitions_ps"); logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); - List parts = null; - List matchingParts = new ArrayList(); - // This gets all the partitions and then filters based on the specified - // criteria. An alternative approach would be to get all the partition - // names, do the filtering on the names, and get the partition for each - // of the names. that match. - + Table t; try { - parts = get_partitions(db_name, tbl_name, (short) -1); + t = get_table(db_name, tbl_name); } catch (NoSuchObjectException e) { throw new MetaException(e.getMessage()); } - for (Partition p : parts) { - if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) { - matchingParts.add(p); + if (part_vals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return matchingParts; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getPartitionsByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; } @Override @@ -1928,23 +1946,37 @@ throw new MetaException(e.getMessage()); } - List partNames = get_partition_names(db_name, tbl_name, max_parts); - List filteredPartNames = new ArrayList(); - - for(String name : partNames) { - LinkedHashMap spec = Warehouse.makeSpecFromName(name); - List vals = new ArrayList(); - // Since we are iterating through a LinkedHashMap, iteration should - // return the partition values in the correct order for comparison. - for (String val : spec.values()) { - vals.add(val); + if (part_vals.size() > t.getPartitionKeys().size()) { + throw new MetaException("Incorrect number of partition values"); + } + // Create a map from the partition column name to the partition value + Map partKeyToValues = new LinkedHashMap(); + int i=0; + for (String value : part_vals) { + String col = t.getPartitionKeys().get(i).getName(); + if (value.length() > 0) { + partKeyToValues.put(col, value); } - if (MetaStoreUtils.pvalMatches(part_vals, vals)) { - filteredPartNames.add(name); - } + i++; } + final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues); - return filteredPartNames; + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } + + return ret; } @Override Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1058396) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -1262,8 +1262,53 @@ private List listMPartitionsByFilter(String dbName, String tableName, String filter, short maxParts) throws MetaException, NoSuchObjectException{ - throw new RuntimeException("listMPartitionsByFilter is not supported " + - "due to a JDO library downgrade"); + boolean success = false; + List mparts = null; + try { + openTransaction(); + LOG.debug("Executing listMPartitionsByFilter"); + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + + MTable mtable = getMTable(dbName, tableName); + if( mtable == null ) { + throw new NoSuchObjectException("Specified database/table does not exist : " + + dbName + "." + tableName); + } + Map params = new HashMap(); + String queryFilterString = + makeQueryFilterString(mtable, filter, params); + + Query query = pm.newQuery(MPartition.class, + queryFilterString); + + if( maxParts >= 0 ) { + //User specified a row limit, set it on the Query + query.setRange(0, maxParts); + } + + LOG.debug("Filter specified is " + filter + "," + + " JDOQL filter is " + queryFilterString); + + params.put("t1", tableName.trim()); + params.put("t2", dbName.trim()); + + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); + + mparts = (List) query.executeWithMap(params); + + LOG.debug("Done executing query for listMPartitionsByFilter"); + pm.retrieveAll(mparts); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMPartitionsByFilter"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mparts; } @Override