diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 4f58cd91efc..f95872092a2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -1543,9 +1543,10 @@ private String generateJDOFilter(org.apache.hadoop.hive.metastore.api.Table tabl private String generateJDOFilter(org.apache.hadoop.hive.metastore.api.Table table, ExpressionTree exprTree) throws MetaException { + assert table != null; ExpressionTree.FilterBuilder filterBuilder = new ExpressionTree.FilterBuilder(true); Map params = new HashMap<>(); - exprTree.generateJDOFilterFragment(conf, table, params, filterBuilder); + exprTree.generateJDOFilterFragment(conf, params, filterBuilder, table.getPartitionKeys()); StringBuilder stringBuilder = new StringBuilder(filterBuilder.getFilter()); // replace leading && stringBuilder.replace(0, 4, ""); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index d1558876f14..b69277e5a95 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -43,7 +43,6 @@ import javax.jdo.Transaction; import javax.jdo.datastore.JDOConnection; -import com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats; @@ -93,13 +92,14 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.common.util.BloomFilter; import org.datanucleus.store.rdbms.query.ForwardQueryResult; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; /** @@ -530,13 +530,10 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ * @param max The maximum number of partitions to return. * @return List of partitions. */ - public List getPartitionsViaSqlFilter( + public List getPartitionsViaSqlFilter(String catName, String dbName, String tableName, SqlFilterForPushdown filter, Integer max) throws MetaException { - Boolean isViewTable = isViewTable(filter.table); - String catName = filter.table.isSetCatName() ? filter.table.getCatName() : - DEFAULT_CATALOG_NAME; List partitionIds = getPartitionIdsViaSqlFilter(catName, - filter.table.getDbName(), filter.table.getTableName(), filter.filter, filter.params, + dbName, tableName, filter.filter, filter.params, filter.joins, max); if (partitionIds.isEmpty()) { return Collections.emptyList(); // no partitions, bail early. @@ -544,8 +541,8 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ return Batchable.runBatched(batchSize, partitionIds, new Batchable() { @Override public List run(List input) throws MetaException { - return getPartitionsFromPartitionIds(catName, filter.table.getDbName(), - filter.table.getTableName(), isViewTable, input, Collections.emptyList()); + return getPartitionsFromPartitionIds(catName, dbName, + tableName, null, input, Collections.emptyList()); } }); } @@ -647,22 +644,24 @@ public Database getDatabase(String catName, String dbName) throws MetaException{ private final List params = new ArrayList<>(); private final List joins = new ArrayList<>(); private String filter; - private Table table; + private String catName; + private String dbName; + private String tableName; } - public boolean generateSqlFilterForPushdown( - Table table, ExpressionTree tree, SqlFilterForPushdown result) throws MetaException { - return generateSqlFilterForPushdown(table, tree, null, result); - } - - public boolean generateSqlFilterForPushdown(Table table, ExpressionTree tree, String defaultPartitionName, - SqlFilterForPushdown result) throws MetaException { + public boolean generateSqlFilterForPushdown(String catName, String dbName, String tableName, + List partitionKeys, ExpressionTree tree, String defaultPartitionName, + SqlFilterForPushdown result) throws MetaException { // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround. + assert partitionKeys != null; boolean dbHasJoinCastBug = DatabaseProduct.hasJoinOperationOrderBug(dbType); - result.table = table; - result.filter = PartitionFilterGenerator.generateSqlFilter(table, tree, result.params, - result.joins, dbHasJoinCastBug, ((defaultPartitionName == null) ? defaultPartName : defaultPartitionName), - dbType, schema); + result.tableName = tableName; + result.dbName = dbName; + result.catName = catName; + result.filter = PartitionFilterGenerator.generateSqlFilter(catName, dbName, tableName, + partitionKeys, tree, result.params, result.joins, dbHasJoinCastBug, + ((defaultPartitionName == null) ? defaultPartName : defaultPartitionName), + dbType, schema); return result.filter != null; } @@ -824,16 +823,6 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw Long sdId = MetastoreDirectSqlUtils.extractSqlLong(fields[1]); Long colId = MetastoreDirectSqlUtils.extractSqlLong(fields[2]); Long serdeId = MetastoreDirectSqlUtils.extractSqlLong(fields[3]); - // A partition must have at least sdId and serdeId set, or nothing set if it's a view. - if (sdId == null || serdeId == null) { - if (isView == null) { - isView = isViewTable(catName, dbName, tblName); - } - if ((sdId != null || colId != null || serdeId != null) || !isView) { - throw new MetaException("Unexpected null for one of the IDs, SD " + sdId + - ", serde " + serdeId + " for a " + (isView ? "" : "non-") + " view"); - } - } Partition part = new Partition(); orderedResult.add(part); @@ -959,9 +948,9 @@ private boolean isViewTable(String catName, String dbName, String tblName) throw public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws MetaException { boolean doTrace = LOG.isDebugEnabled(); - String catName = filter.table.getCatName().toLowerCase(); - String dbName = filter.table.getDbName().toLowerCase(); - String tblName = filter.table.getTableName().toLowerCase(); + String catName = filter.catName.toLowerCase(); + String dbName = filter.dbName.toLowerCase(); + String tblName = filter.tableName.toLowerCase(); // Get number of partitions by doing count on PART_ID. String queryText = "select count(" + PARTITIONS + ".\"PART_ID\") from " + PARTITIONS + "" @@ -998,7 +987,10 @@ private static String trimCommaList(StringBuilder sb) { } private static class PartitionFilterGenerator extends TreeVisitor { - private final Table table; + private String catName; + private String dbName; + private String tableName; + private List partitionKeys; private final FilterBuilder filterBuffer; private final List params; private final List joins; @@ -1007,9 +999,13 @@ private static String trimCommaList(StringBuilder sb) { private final DatabaseProduct dbType; private final String PARTITION_KEY_VALS, PARTITIONS, DBS, TBLS; - private PartitionFilterGenerator(Table table, List params, List joins, + private PartitionFilterGenerator(String catName, String dbName, String tableName, + List partitionKeys, List params, List joins, boolean dbHasJoinCastBug, String defaultPartName, DatabaseProduct dbType, String schema) { - this.table = table; + this.catName = catName; + this.dbName = dbName; + this.tableName = tableName; + this.partitionKeys = partitionKeys; this.params = params; this.joins = joins; this.dbHasJoinCastBug = dbHasJoinCastBug; @@ -1024,15 +1020,18 @@ private PartitionFilterGenerator(Table table, List params, List /** * Generate the ANSI SQL92 filter for the given expression tree - * @param table the table being queried + * @param catName catalog name + * @param dbName db name + * @param tableName table name + * @param partitionKeys partition keys * @param params the ordered parameters for the resulting expression * @param joins the joins necessary for the resulting expression * @return the string representation of the expression tree */ - private static String generateSqlFilter(Table table, ExpressionTree tree, List params, + private static String generateSqlFilter(String catName, String dbName, String tableName, + List partitionKeys, ExpressionTree tree, List params, List joins, boolean dbHasJoinCastBug, String defaultPartName, DatabaseProduct dbType, String schema) throws MetaException { - assert table != null; if (tree == null) { // consistent with other APIs like makeExpressionTree, null is returned to indicate that // the filter could not pushed down due to parsing issue etc @@ -1042,7 +1041,8 @@ private static String generateSqlFilter(Table table, ExpressionTree tree, List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); result.addAll(new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { // If we have some sort of expression tree, try SQL filter pushdown. if (exprTree != null) { SqlFilterForPushdown filter = new SqlFilterForPushdown(); - if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, defaultPartitionName, filter)) { - return directSql.getPartitionsViaSqlFilter(filter, null); + if (directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, + exprTree, defaultPartitionName, filter)) { + String catalogName = (catName != null) ? catName : DEFAULT_CATALOG_NAME; + return directSql.getPartitionsViaSqlFilter(catalogName, dbName, tblName, filter, null); } } // We couldn't do SQL filter pushdown. Get names via normal means. List partNames = new LinkedList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( - ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); + catName, dbName, tblName, partitionKeys, expr, defaultPartitionName, maxParts, partNames)); return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames); } @@ -3355,18 +3495,18 @@ protected boolean getPartitionsByExprInternal(String catName, String dbName, Str // If we have some sort of expression tree, try JDOQL filter pushdown. List result = null; if (exprTree != null) { - result = getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false); + result = getPartitionsViaOrmFilter(catName, dbName, tblName, exprTree, maxParts, false, partitionKeys); } if (result == null) { // We couldn't do JDOQL filter pushdown. Get names via normal means. List partNames = new ArrayList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( - ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); + catName, dbName, tblName, partitionKeys, expr, defaultPartitionName, maxParts, partNames)); result = getPartitionsViaOrmFilter(catName, dbName, tblName, partNames); } return result; } - }.run(true)); + }.run(false)); return hasUnknownPartitions.get(); } @@ -3383,19 +3523,27 @@ private String getDefaultPartitionName(String inputDefaultPartName) { /** * Gets the partition names from a table, pruned using an expression. - * @param table Table. + * @param catName + * @param dbName + * @param tblName * @param expr Expression. * @param defaultPartName Default partition name from job config, if any. * @param maxParts Maximum number of partition names to return. * @param result The resulting names. * @return Whether the result contains any unknown partitions. */ - private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, - String defaultPartName, short maxParts, List result) throws MetaException { - result.addAll(getPartitionNamesNoTxn(table.getCatName(), - table.getDbName(), table.getTableName(), maxParts)); - return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, - getDefaultPartitionName(defaultPartName), result); + private boolean getPartitionNamesPrunedByExprNoTxn(String catName, String dbName, String tblName, List partColumns, byte[] expr, + String defaultPartName, short maxParts, List result) throws MetaException { + result.addAll(getPartitionNamesNoTxn( + catName, + dbName, + tblName, + maxParts)); + return expressionProxy.filterPartitionsByExpr( + partColumns, + expr, + getDefaultPartitionName(defaultPartName), + result); } /** @@ -3408,11 +3556,11 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, * @return Resulting partitions. Can be null if isValidatedFilter is false, and * there was error deriving the JDO filter. */ - private List getPartitionsViaOrmFilter(Table table, ExpressionTree tree, - short maxParts, boolean isValidatedFilter) throws MetaException { + private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, ExpressionTree tree, + short maxParts, boolean isValidatedFilter, List partitionKeys) throws MetaException { Map params = new HashMap<>(); String jdoFilter = - makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, params, isValidatedFilter); + makeQueryFilterString(catName, dbName, tblName, tree, params, isValidatedFilter, partitionKeys); if (jdoFilter == null) { assert !isValidatedFilter; return null; @@ -3434,11 +3582,11 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, return results; } - private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter) - throws MetaException { + private Integer getNumPartitionsViaOrmFilter(String catName, String dbName, String tblName, ExpressionTree tree, boolean isValidatedFilter, List partitionKeys) + throws MetaException { Map params = new HashMap<>(); - String jdoFilter = makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, - params, isValidatedFilter); + String jdoFilter = makeQueryFilterString(catName, dbName, tblName, tree, + params, isValidatedFilter, partitionKeys); if (jdoFilter == null) { assert !isValidatedFilter; return null; @@ -3826,6 +3974,12 @@ public int getNumPartitionsByFilter(String catName, String dbName, String tblNam final ExpressionTree exprTree = org.apache.commons.lang3.StringUtils.isNotEmpty(filter) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + MTable mTable = ensureGetMTable(catName, dbName, tblName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); + return new GetHelper(catName, dbName, tblName, true, true) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @@ -3836,7 +3990,7 @@ protected String describeResult() { @Override protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { - return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter); + return directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, exprTree, null, filter); } @Override @@ -3846,9 +4000,9 @@ protected Integer getSqlResult(GetHelper ctx) throws MetaException { @Override protected Integer getJdoResult( GetHelper ctx) throws MetaException, NoSuchObjectException { - return getNumPartitionsViaOrmFilter(ctx.getTable(), exprTree, true); + return getNumPartitionsViaOrmFilter(catName ,dbName, tblName, exprTree, true, partitionKeys); } - }.run(true); + }.run(false); } @Override @@ -3857,6 +4011,11 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr, null); final byte[] tempExpr = expr; // Need to be final to pass it to an inner class + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + MTable mTable = ensureGetMTable(catName, dbName, tblName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); return new GetHelper(catName, dbName, tblName, true, true) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @@ -3868,7 +4027,7 @@ protected String describeResult() { @Override protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { - return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter); + return directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, exprTree, null, filter); } @Override @@ -3882,7 +4041,7 @@ protected Integer getJdoResult( if (exprTree != null) { try { - numPartitions = getNumPartitionsViaOrmFilter(ctx.getTable(), exprTree, true); + numPartitions = getNumPartitionsViaOrmFilter(catName ,dbName, tblName, exprTree, true, partitionKeys); } catch (MetaException e) { numPartitions = null; } @@ -3891,19 +4050,26 @@ protected Integer getJdoResult( // if numPartitions could not be obtained from ORM filters, then get number partitions names, and count them if (numPartitions == null) { List filteredPartNames = new ArrayList<>(); - getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), tempExpr, "", (short) -1, filteredPartNames); + getPartitionNamesPrunedByExprNoTxn(catName, dbName, tblName, partitionKeys, tempExpr, "", (short) -1, filteredPartNames); numPartitions = filteredPartNames.size(); } return numPartitions; } - }.run(true); + }.run(false); } protected List getPartitionsByFilterInternal( String catName, String dbName, String tblName, String filter, final short maxParts, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + + MTable mTable = ensureGetMTable(catName, dbName, tblName); + List partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys()); final ExpressionTree tree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @@ -3911,20 +4077,20 @@ protected Integer getJdoResult( @Override protected boolean canUseDirectSql(GetHelper> ctx) throws MetaException { - return directSql.generateSqlFilterForPushdown(ctx.getTable(), tree, filter); + return directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys, tree, null, filter); } @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlFilter(filter, (maxParts < 0) ? null : (int)maxParts); + return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, filter, (maxParts < 0) ? null : (int)maxParts); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(ctx.getTable(), tree, maxParts, true); + return getPartitionsViaOrmFilter(catName, dbName, tblName, tree, maxParts, true, partitionKeys); } - }.run(true); + }.run(false); } @Override @@ -3964,7 +4130,8 @@ protected boolean canUseDirectSql(GetHelper> ctx) throws MetaExc // if the filter mode is BY_EXPR initialize the filter and generate the expression tree // if there are more than one filter string we AND them together initExpressionTree(); - return directSql.generateSqlFilterForPushdown(ctx.getTable(), tree, filter); + return directSql.generateSqlFilterForPushdown(table.getCatName(), table.getDbName(), table.getTableName(), + table.getPartitionKeys(), tree, null, filter); } // BY_VALUES and BY_NAMES are always supported return true; @@ -4108,10 +4275,30 @@ private String makeQueryFilterString(String catName, String dbName, Table table, params.put("catName", catName); } - tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder); + tree.generateJDOFilterFragment(getConf(), params, queryBuilder, table != null ? table.getPartitionKeys() : null); + if (queryBuilder.hasError()) { + assert !isValidatedFilter; + LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); + return null; + } + String jdoFilter = queryBuilder.getFilter(); + LOG.debug("jdoFilter = {}", jdoFilter); + return jdoFilter; + } + + private String makeQueryFilterString(String catName, String dbName, String tblName, + ExpressionTree tree, Map params, + boolean isValidatedFilter, List partitionKeys) throws MetaException { + assert tree != null; + FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); + queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + params.put("t1", tblName); + params.put("t2", dbName); + params.put("t3", catName); + tree.generateJDOFilterFragment(getConf(), params, queryBuilder, partitionKeys); if (queryBuilder.hasError()) { assert !isValidatedFilter; - LOG.info("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); + LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); return null; } String jdoFilter = queryBuilder.getFilter(); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index 9834883f00f..f4f5e1dd509 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -19,6 +19,7 @@ import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.Stack; @@ -28,14 +29,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * The Class representing the filter as a binary tree. The tree has TreeNode's @@ -222,23 +223,18 @@ protected void accept(TreeVisitor visitor) throws MetaException { /** * Generates a JDO filter statement - * @param table - * The table on which the filter is applied. If table is not null, - * then this method generates a JDO statement to get all partitions - * of the table that match the filter. - * If table is null, then this method generates a JDO statement to get all - * tables that match the filter. * @param params * A map of parameter key to values for the filter statement. * @param filterBuffer The filter builder that is used to build filter. + * @param partitionKeys * @throws MetaException */ - public void generateJDOFilter(Configuration conf, Table table, - Map params, FilterBuilder filterBuffer) throws MetaException { + public void generateJDOFilter(Configuration conf, + Map params, FilterBuilder filterBuffer, List partitionKeys) throws MetaException { if (filterBuffer.hasError()) return; if (lhs != null) { filterBuffer.append (" ("); - lhs.generateJDOFilter(conf, table, params, filterBuffer); + lhs.generateJDOFilter(conf, params, filterBuffer, partitionKeys); if (rhs != null) { if( andOr == LogicalOperator.AND ) { @@ -247,7 +243,7 @@ public void generateJDOFilter(Configuration conf, Table table, filterBuffer.append(" || "); } - rhs.generateJDOFilter(conf, table, params, filterBuffer); + rhs.generateJDOFilter(conf, params, filterBuffer, partitionKeys); } filterBuffer.append (") "); } @@ -271,10 +267,10 @@ protected void accept(TreeVisitor visitor) throws MetaException { } @Override - public void generateJDOFilter(Configuration conf, Table table, Map params, - FilterBuilder filterBuilder) throws MetaException { - if (table != null) { - generateJDOFilterOverPartitions(conf, table, params, filterBuilder); + public void generateJDOFilter(Configuration conf, Map params, + FilterBuilder filterBuilder, List partitionKeys) throws MetaException { + if (partitionKeys != null) { + generateJDOFilterOverPartitions(conf, params, filterBuilder, partitionKeys); } else { generateJDOFilterOverTables(params, filterBuilder); } @@ -344,16 +340,16 @@ private void generateJDOFilterGeneral(Map params, } } - private void generateJDOFilterOverPartitions(Configuration conf, Table table, - Map params, FilterBuilder filterBuilder) throws MetaException { - int partitionColumnCount = table.getPartitionKeys().size(); - int partitionColumnIndex = getPartColIndexForFilter(table, filterBuilder); + private void generateJDOFilterOverPartitions(Configuration conf, + Map params, FilterBuilder filterBuilder, List partitionKeys) throws MetaException { + int partitionColumnCount = partitionKeys.size(); + int partitionColumnIndex = getPartColIndexForFilter(partitionKeys, filterBuilder); if (filterBuilder.hasError()) return; boolean canPushDownIntegral = MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.INTEGER_JDO_PUSHDOWN); String valueAsString = getJdoFilterPushdownParam( - table, partitionColumnIndex, filterBuilder, canPushDownIntegral); + partitionColumnIndex, filterBuilder, canPushDownIntegral, partitionKeys); if (filterBuilder.hasError()) return; String paramName = PARAM_PREFIX + params.size(); @@ -365,7 +361,7 @@ private void generateJDOFilterOverPartitions(Configuration conf, Table table, boolean isOpEquals = operator == Operator.EQUALS; if (isOpEquals || operator == Operator.NOTEQUALS || operator == Operator.NOTEQUALS2) { - String partitionKey = table.getPartitionKeys().get(partitionColumnIndex).getName(); + String partitionKey = partitionKeys.get(partitionColumnIndex).getName(); makeFilterForEquals(partitionKey, valueAsString, paramName, params, partitionColumnIndex, partitionColumnCount, isOpEquals, filterBuilder); return; @@ -424,22 +420,21 @@ public boolean canJdoUseStringsWithIntegral() { /** * Get partition column index in the table partition column list that * corresponds to the key that is being filtered on by this tree node. - * @param table The table. + * @param partitionKeys list of partition keys. * @param filterBuilder filter builder used to report error, if any. * @return The index. */ public int getPartColIndexForFilter( - Table table, FilterBuilder filterBuilder) throws MetaException { + List partitionKeys, FilterBuilder filterBuilder) throws MetaException { + assert (partitionKeys.size() > 0); int partitionColumnIndex; - assert (table.getPartitionKeys().size() > 0); - for (partitionColumnIndex = 0; partitionColumnIndex < table.getPartitionKeys().size(); - ++partitionColumnIndex) { - if (table.getPartitionKeys().get(partitionColumnIndex).getName(). - equalsIgnoreCase(keyName)) { + for (partitionColumnIndex = 0; partitionColumnIndex < partitionKeys.size(); + ++partitionColumnIndex) { + if (partitionKeys.get(partitionColumnIndex).getName().equalsIgnoreCase(keyName)) { break; } } - if( partitionColumnIndex == table.getPartitionKeys().size()) { + if( partitionColumnIndex == partitionKeys.size()) { filterBuilder.setError("Specified key <" + keyName + "> is not a partitioning key for the table"); return -1; @@ -451,15 +446,15 @@ public int getPartColIndexForFilter( /** * Validates and gets the query parameter for JDO filter pushdown based on the column * and the constant stored in this node. - * @param table The table. + * @param partitionKeys * @param partColIndex The index of the column to check. * @param filterBuilder filter builder used to report error, if any. * @return The parameter string. */ - private String getJdoFilterPushdownParam(Table table, int partColIndex, - FilterBuilder filterBuilder, boolean canPushDownIntegral) throws MetaException { + private String getJdoFilterPushdownParam(int partColIndex, + FilterBuilder filterBuilder, boolean canPushDownIntegral, List partitionKeys) throws MetaException { boolean isIntegralSupported = canPushDownIntegral && canJdoUseStringsWithIntegral(); - String colType = table.getPartitionKeys().get(partColIndex).getType(); + String colType = partitionKeys.get(partColIndex).getType(); // Can only support partitions whose types are string, or maybe integers if (!colType.equals(ColumnType.STRING_TYPE_NAME) && (!isIntegralSupported || !ColumnType.IntegralTypes.contains(colType))) { @@ -594,20 +589,20 @@ public void addLeafNode(LeafNode newNode) { } /** Generate the JDOQL filter for the given expression tree - * @param table the table being queried * @param params the input map which is updated with the * the parameterized values. Keys are the parameter names and values * are the parameter values * @param filterBuilder the filter builder to append to. + * @param partitionKeys */ - public void generateJDOFilterFragment(Configuration conf, Table table, - Map params, FilterBuilder filterBuilder) throws MetaException { + public void generateJDOFilterFragment(Configuration conf, + Map params, FilterBuilder filterBuilder, List partitionKeys) throws MetaException { if (root == null) { return; } filterBuilder.append(" && ( "); - root.generateJDOFilter(conf, table, params, filterBuilder); + root.generateJDOFilter(conf, params, filterBuilder, partitionKeys); filterBuilder.append(" )"); }