diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 73f185a1f3..028bc684ca 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2456,6 +2456,8 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "org.apache.hadoop.hive.serde2.avro.AvroSerDe", "The comma-separated list of SerDe classes that are considered when enhancing table-properties \n" + "during logical optimization."), + HIVE_COMPILER_PARTITION_SPEC_API("hive.compiler.partition.spec.api", true, + "Use partition spec API instead of get_partition_by_expr"), // CTE HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index f51c0fc871..39972955a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2121,6 +2121,9 @@ public static String formatBinaryString(byte[] array, int start, int length) { } public static List getColumnNamesFromSortCols(List sortCols) { + if(sortCols == null) { + return Collections.emptyList(); + } List names = new ArrayList(); for (Order o : sortCols) { names.add(o.getCol()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 3b0b56d0bd..332d1ef8a7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -116,63 +116,7 @@ import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventRequestData; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.Materialization; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; @@ -3863,6 +3807,55 @@ public boolean getPartitionsByExpr(Table tbl, ExprNodeGenericFuncDesc expr, Hive return hasUnknownParts; } + public boolean getPartitionsByPartSpec(Table tbl, ExprNodeGenericFuncDesc expr, HiveConf conf, + List result) throws HiveException, TException { + GetPartitionsFilterSpec filterSpec = new GetPartitionsFilterSpec(); + filterSpec.setFilterMode(PartitionFilterMode.BY_EXPR); + filterSpec.setFilters(Arrays.asList(expr.getExprString())); + + // create projection spec + GetPartitionsProjectionSpec projSpec = new GetPartitionsProjectionSpec(); + projSpec + .setFieldList(Arrays.asList("dbName", "tableName", "catName", "parameters", "values","sd")); + projSpec.setIncludeParamKeyPattern("%"); + GetPartitionsRequest request = new GetPartitionsRequest(); + request.setTblName(tbl.getTableName()); + request.setDbName(tbl.getDbName()); + request.setProjectionSpec(projSpec); + request.setFilterSpec(filterSpec); + PerfLogger perfLogger = SessionState.getPerfLogger(); + perfLogger.PerfLogBegin("fetch partitions", "get_partitions_by_spec"); + GetPartitionsResponse response = getMSC().getPartitionsWithSpecs(request); + perfLogger.PerfLogEnd("fetch partitions", "get_partitions_by_spec"); + + perfLogger.PerfLogBegin("convert partition spec to partitions", "get_partitions_by_spec"); + result.addAll(convertFromPartSpec(response, tbl)); + perfLogger.PerfLogEnd("convert partition spec to partitions", "get_partitions_by_spec"); + return true; + } + + private List convertFromPartSpec(GetPartitionsResponse response, Table tbl) + throws HiveException { + List partitionList = new ArrayList<>(); + List metastorePart = new ArrayList<>(); + PartitionSpec partitionSpec = response.getPartitionSpecIterator().next(); + for(PartitionWithoutSD partitionWithoutSD:partitionSpec.getSharedSDPartitionSpec().getPartitions()) { + org.apache.hadoop.hive.metastore.api.Partition part = new org.apache.hadoop.hive.metastore.api.Partition(); + part.setTableName(partitionSpec.getTableName()); + part.setDbName(partitionSpec.getDbName()); + part.setCatName(partitionSpec.getCatName()); + part.setCreateTime(partitionWithoutSD.getCreateTime()); + part.setLastAccessTime(partitionWithoutSD.getLastAccessTime()); + part.setParameters(partitionWithoutSD.getParameters()); + part.setPrivileges(partitionWithoutSD.getPrivileges()); + part.setSd(partitionSpec.getSharedSDPartitionSpec().getSd()); + part.setValues(partitionWithoutSD.getValues()); + part.setWriteId(partitionSpec.getWriteId()); + metastorePart.add(part); + } + return convertFromMetastore(tbl, metastorePart); + } + /** * Get a number of Partitions by filter. * @param tbl The table containing the partitions. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 673d8580d5..16930f1791 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -451,8 +451,15 @@ private static PrunedPartitionList getPartitionsFromServer(Table tab, final Stri if (!doEvalClientSide) { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); try { - hasUnknownPartitions = Hive.get().getPartitionsByExpr( - tab, compactExpr, conf, partitions); + boolean useNewApi = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPILER_PARTITION_SPEC_API); + if(useNewApi) { + hasUnknownPartitions = + Hive.get().getPartitionsByPartSpec(tab, compactExpr, conf, partitions); + + } else { + hasUnknownPartitions = + Hive.get().getPartitionsByExpr(tab, compactExpr, conf, partitions); + } } catch (IMetaStoreClient.IncompatibleMetastoreException ime) { // TODO: backward compat for Hive <= 0.12. Can be removed later. LOG.warn("Metastore doesn't support getPartitionsByExpr", ime);