diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 755654c..9be4ba6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -935,11 +935,11 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException Table baseTbl = db.getTable(baseTableName); if (baseTbl.isPartitioned()) { - List baseParts; + List baseParts = new ArrayList(); if (alterIndex.getSpec() != null) { - baseParts = db.getPartitions(baseTbl, alterIndex.getSpec()); + baseParts.addAll(db.getAllPartitionsOf(baseTbl, alterIndex.getSpec())); } else { - baseParts = db.getPartitions(baseTbl); + baseParts.addAll(db.getAllPartitionsOf(baseTbl)); } if (baseParts != null) { for (Partition p : baseParts) { @@ -1311,7 +1311,8 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, Map partSpec = simpleDesc.getPartSpec(); PartSpecInfo partSpecInfo = PartSpecInfo.create(tbl, partSpec); - List partitions = db.getPartitions(tbl, partSpec); + List partitions = new ArrayList(); + partitions.addAll(db.getAllPartitionsOf(tbl, partSpec)); Path originalDir = null; @@ -1547,7 +1548,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) Map partSpec = simpleDesc.getPartSpec(); PartSpecInfo partSpecInfo = PartSpecInfo.create(tbl, partSpec); - List partitions = db.getPartitions(tbl, partSpec); + Set partitions = db.getAllPartitionsOf(tbl, partSpec); int partSpecLevel = partSpec.size(); @@ -1571,7 +1572,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) } originalDir = partSpecInfo.createPath(tbl); } else { - Partition p = partitions.get(0); + Partition p = partitions.iterator().next(); if(ArchiveUtils.isArchived(p)) { originalDir = new Path(getOriginalLocation(p)); } else { @@ -1781,13 +1782,13 @@ private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { } } else { Map partSpec = desc.getPartSpec(); - List partitions = db.getPartitions(tbl, partSpec); + Set partitions = db.getAllPartitionsOf(tbl, partSpec); if (partitions.size() > 1) { throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS); } else if (partitions.size() == 0) { throw new HiveException(ErrorMsg.INVALID_PARTITION_SPEC); } - partName = partitions.get(0).getName(); + partName = partitions.iterator().next().getName(); } db.compact(tbl.getDbName(), tbl.getTableName(), partName, desc.getCompactionType(), desc.getProps()); console.printInfo("Compaction enqueued."); @@ -3279,7 +3280,8 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } else { // DDLSemanticAnalyzer has already checked if partial partition specs are allowed, // thus we should not need to check it here. - allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec()); + allPartitions = new ArrayList(); + allPartitions.addAll(db.getAllPartitionsOf(tbl, alterTbl.getPartSpec())); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index ee6c564..53aef4f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2286,6 +2286,31 @@ public boolean dropPartition(String dbName, String tableName, List partV } /** + * Get all the partitions; unlike {@link #getPartitions(Table)}, does not include auth. + * @param tbl table for which partitions are needed + * @return list of partition objects + */ + public Set getAllPartitionsOf(Table tbl, Map partialPartSpec) throws HiveException { + if (!tbl.isPartitioned()) { + throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); + } + + List tParts; + List partialPvals = MetaStoreUtils.getPvals(tbl.getPartCols(), partialPartSpec); + try { + tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), partialPvals, (short)-1); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + Set parts = new LinkedHashSet(tParts.size()); + for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { + parts.add(new Partition(tbl, tpart)); + } + return parts; + } + + /** * get all the partitions of the table that matches the given partial * specification. partition columns whose value is can be anything should be * an empty string. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java index 10fa561..af9cec2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java @@ -197,7 +197,8 @@ void checkTable(String dbName, String tableName, if (table.isPartitioned()) { if (partitions == null || partitions.isEmpty()) { // no partitions specified, let's get all - parts = hive.getPartitions(table); + parts = new ArrayList(); + parts.addAll(hive.getAllPartitionsOf(table)); } else { // we're interested in specific partitions, // don't check for any others diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 4a9db9e..3d6c186 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1704,6 +1704,22 @@ protected Partition getPartition(Table table, Map partSpec, return partitions; } + protected List getPartitionsWithoutAuth(Table table, Map partSpec, + boolean throwException) throws SemanticException { + List partitions = new ArrayList(); + try { + Set partitionsSet = partSpec == null ? db.getAllPartitionsOf(table) : + db.getAllPartitionsOf(table, partSpec); + partitions.addAll(partitionsSet); + } catch (Exception e) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); + } + if (partitions.isEmpty() && throwException) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); + } + return partitions; + } + protected String toMessage(ErrorMsg message, Object detail) { return detail == null ? message.getMsg() : message.getMsg(detail.toString()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 5b32f56..beee44c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -734,7 +734,7 @@ private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws Se throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg()); } // check if source partition exists - getPartitions(sourceTable, partSpecs, true); + getPartitionsWithoutAuth(sourceTable, partSpecs, true); // Verify that the partitions specified are continuous // If a subpartition value is specified without specifying a partition's value @@ -746,7 +746,7 @@ private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws Se } List destPartitions = null; try { - destPartitions = getPartitions(destTable, partSpecs, true); + destPartitions = getPartitionsWithoutAuth(destTable, partSpecs, true); } catch (SemanticException ex) { // We should expect a semantic exception being throw as this partition // should not be present. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java index 653b657..09f9294 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java @@ -143,7 +143,7 @@ private boolean containsPartition(Index index, Map partSpec) throws HiveException { String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); Table indexTable = hive.getTable(qualified[0], qualified[1]); - List parts = hive.getPartitions(indexTable, partSpec); + Set parts = hive.getAllPartitionsOf(indexTable, partSpec); return (parts == null || parts.size() == 0); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java index 5b8ec60..dc37da2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java @@ -223,7 +223,8 @@ public void testPartitionsCheck() throws HiveException, MetaException, assertEquals(Collections.emptyList(), result.getPartitionsNotOnFs()); assertEquals(Collections.emptyList(), result.getPartitionsNotInMs()); - List partitions = hive.getPartitions(table); + List partitions = new ArrayList(); + partitions.addAll(hive.getAllPartitionsOf(table)); assertEquals(2, partitions.size()); Partition partToRemove = partitions.get(0); Path partToRemovePath = partToRemove.getDataLocation();