Index: metastore/ivy.xml
===================================================================
--- metastore/ivy.xml (revision 10754)
+++ metastore/ivy.xml (working copy)
@@ -42,7 +42,7 @@
-
+
Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 10754)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy)
@@ -1778,45 +1778,27 @@
throws MetaException, TException {
incrementCounter("get_partitions_ps");
logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals);
+ List parts = null;
+ List matchingParts = new ArrayList();
- Table t;
+ // This gets all the partitions and then filters based on the specified
+ // criteria. An alternative approach would be to get all the partition
+ // names, do the filtering on the names, and get the partition for each
+ // of the names. that match.
+
try {
- t = get_table(db_name, tbl_name);
+ parts = get_partitions(db_name, tbl_name, (short) -1);
} catch (NoSuchObjectException e) {
throw new MetaException(e.getMessage());
}
- if (part_vals.size() > t.getPartitionKeys().size()) {
- throw new MetaException("Incorrect number of partition values");
- }
- // Create a map from the partition column name to the partition value
- Map partKeyToValues = new LinkedHashMap();
- int i=0;
- for (String value : part_vals) {
- String col = t.getPartitionKeys().get(i).getName();
- if (value.length() > 0) {
- partKeyToValues.put(col, value);
+ for (Partition p : parts) {
+ if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) {
+ matchingParts.add(p);
}
- i++;
}
- final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues);
- List ret = null;
- try {
- ret = executeWithRetry(new Command>() {
- @Override
- List run(RawStore ms) throws Exception {
- return ms.getPartitionsByFilter(db_name, tbl_name, filter, max_parts);
- }
- });
- } catch (MetaException e) {
- throw e;
- } catch (Exception e) {
- assert(e instanceof RuntimeException);
- throw (RuntimeException)e;
- }
-
- return ret;
+ return matchingParts;
}
@Override
@@ -1832,37 +1814,23 @@
throw new MetaException(e.getMessage());
}
- if (part_vals.size() > t.getPartitionKeys().size()) {
- throw new MetaException("Incorrect number of partition values");
- }
- // Create a map from the partition column name to the partition value
- Map partKeyToValues = new LinkedHashMap();
- int i=0;
- for (String value : part_vals) {
- String col = t.getPartitionKeys().get(i).getName();
- if (value.length() > 0) {
- partKeyToValues.put(col, value);
+ List partNames = get_partition_names(db_name, tbl_name, max_parts);
+ List filteredPartNames = new ArrayList();
+
+ for(String name : partNames) {
+ LinkedHashMap spec = Warehouse.makeSpecFromName(name);
+ List vals = new ArrayList();
+ // Since we are iterating through a LinkedHashMap, iteration should
+ // return the partition values in the correct order for comparison.
+ for (String val : spec.values()) {
+ vals.add(val);
}
- i++;
+ if (MetaStoreUtils.pvalMatches(part_vals, vals)) {
+ filteredPartNames.add(name);
+ }
}
- final String filter = MetaStoreUtils.makeFilterStringFromMap(partKeyToValues);
- List ret = null;
- try {
- ret = executeWithRetry(new Command>() {
- @Override
- List run(RawStore ms) throws Exception {
- return ms.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts);
- }
- });
- } catch (MetaException e) {
- throw e;
- } catch (Exception e) {
- assert(e instanceof RuntimeException);
- throw (RuntimeException)e;
- }
-
- return ret;
+ return filteredPartNames;
}
@Override
Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 10754)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy)
@@ -24,8 +24,8 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.Properties;
+import java.util.Map.Entry;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@@ -66,9 +66,9 @@
import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.model.MType;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
import org.apache.hadoop.hive.metastore.parser.FilterLexer;
import org.apache.hadoop.hive.metastore.parser.FilterParser;
+import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
import org.apache.hadoop.util.StringUtils;
/**
@@ -1017,53 +1017,8 @@
private List listMPartitionsByFilter(String dbName, String tableName,
String filter, short maxParts) throws MetaException, NoSuchObjectException{
- boolean success = false;
- List mparts = null;
- try {
- openTransaction();
- LOG.debug("Executing listMPartitionsByFilter");
- dbName = dbName.toLowerCase();
- tableName = tableName.toLowerCase();
-
- MTable mtable = getMTable(dbName, tableName);
- if( mtable == null ) {
- throw new NoSuchObjectException("Specified database/table does not exist : "
- + dbName + "." + tableName);
- }
- Map params = new HashMap();
- String queryFilterString =
- makeQueryFilterString(mtable, filter, params);
-
- Query query = pm.newQuery(MPartition.class,
- queryFilterString);
-
- if( maxParts >= 0 ) {
- //User specified a row limit, set it on the Query
- query.setRange(0, maxParts);
- }
-
- LOG.debug("Filter specified is " + filter + "," +
- " JDOQL filter is " + queryFilterString);
-
- params.put("t1", tableName.trim());
- params.put("t2", dbName.trim());
-
- String parameterDeclaration = makeParameterDeclarationString(params);
- query.declareParameters(parameterDeclaration);
- query.setOrdering("partitionName ascending");
-
- mparts = (List) query.executeWithMap(params);
-
- LOG.debug("Done executing query for listMPartitionsByFilter");
- pm.retrieveAll(mparts);
- success = commitTransaction();
- LOG.debug("Done retrieving all objects for listMPartitionsByFilter");
- } finally {
- if (!success) {
- rollbackTransaction();
- }
- }
- return mparts;
+ throw new RuntimeException("listMPartitionsByFilter is not supported " +
+ "due to a JDO library downgrade");
}
@Override
Index: ivy/libraries.properties
===================================================================
--- ivy/libraries.properties (revision 10754)
+++ ivy/libraries.properties (working copy)
@@ -23,9 +23,9 @@
antlr-runtime.version=3.0.1
asm.version=3.1
datanucleus-connectionpool.version=2.0.3
-datanucleus-core.version=2.1.1
-datanucleus-enhancer.version=2.1.0-release
-datanucleus-rdbms.version=2.1.1
+datanucleus-core.version=2.0.3
+datanucleus-enhancer.version=2.0.3
+datanucleus-rdbms.version=2.0.3
checkstyle.version=5.0
commons-cli.version=2.0-SNAPSHOT
commons-codec.version=1.3
@@ -35,7 +35,7 @@
commons-logging.version=1.0.4
commons-logging-api.version=1.0.4
commons-pool.version=1.5.4
-jdo-api.version=3.0.0
+jdo-api.version=2.3-ec
jdom.version=1.1
jline.version=0.9.94
junit.version=3.8.1
Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
===================================================================
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (revision 10754)
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (working copy)
@@ -416,8 +416,8 @@
e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
e.setPersistenceDelegate(Operator.ProgressCounter.class, new EnumDelegate());
- e.setPersistenceDelegate(org.datanucleus.store.types.sco.backed.Map.class, new MapDelegate());
- e.setPersistenceDelegate(org.datanucleus.store.types.sco.backed.List.class, new ListDelegate());
+ e.setPersistenceDelegate(org.datanucleus.sco.backed.Map.class, new MapDelegate());
+ e.setPersistenceDelegate(org.datanucleus.sco.backed.List.class, new ListDelegate());
e.writeObject(plan);
e.close();