diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 4de038913a..d01e48d01d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -45,12 +45,14 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; @@ -108,6 +110,7 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -1866,6 +1869,25 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException { + + // Get the partition object if it already exists + Partition oldPart = getPartition(tbl, partSpec, false); + + Partition newTPart = loadPartitionInternal(loadPath, tbl, partSpec, oldPart, + loadFileType, inheritTableSpecs, + inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, isAcidIUDoperation, + hasFollowingStatsTask, writeId, stmtId, isInsertOverwrite); + + addPartitionToMetastore(newTPart, hasFollowingStatsTask, tbl); + + return newTPart; + } + + private Partition loadPartitionInternal(Path loadPath, Table tbl, Map partSpec, + Partition oldPart, LoadFileType loadFileType, boolean inheritTableSpecs, + boolean inheritLocation, boolean isSkewedStoreAsSubdir, + boolean isSrcLocal, boolean isAcidIUDoperation, boolean hasFollowingStatsTask, + Long writeId, int stmtId, boolean isInsertOverwrite) throws HiveException{ Path tblDataLocationPath = tbl.getDataLocation(); boolean isMmTableWrite = AcidUtils.isInsertOnlyTable(tbl.getParameters()); assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); @@ -1875,8 +1897,6 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_PARTITION); - // Get the partition object if it already exists - Partition oldPart = getPartition(tbl, partSpec, false); /** * Move files before creating the partition since down stream processes * check for existence of partition in metadata before accessing the data. @@ -1885,7 +1905,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par */ Path oldPartPath = (oldPart != null) ? oldPart.getDataLocation() : null; - Path newPartPath = null; + Path newPartPath; if (inheritLocation) { newPartPath = genPartPathFromTable(tbl, partSpec, tblDataLocationPath); @@ -1906,9 +1926,9 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par } } else { newPartPath = oldPartPath == null - ? genPartPathFromTable(tbl, partSpec, tblDataLocationPath) : oldPartPath; + ? genPartPathFromTable(tbl, partSpec, tblDataLocationPath) : oldPartPath; } - List newFiles = Collections.synchronizedList(new ArrayList()); + List newFiles = Collections.synchronizedList(new ArrayList<>()); perfLogger.PerfLogBegin("MoveTask", PerfLogger.FILE_MOVES); @@ -1936,7 +1956,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("maybe deleting stuff from " + oldPartPath - + " (new " + newPartPath + ") for replace"); + + " (new " + newPartPath + ") for replace"); } } else { // Either a non-MM query, or a load into MM table from an external source. @@ -1945,7 +1965,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par assert !isAcidIUDoperation; // We will load into MM directory, and hide previous directories if needed. destPath = new Path(destPath, isInsertOverwrite - ? AcidUtils.baseDir(writeId) : AcidUtils.deltaSubdir(writeId, writeId, stmtId)); + ? AcidUtils.baseDir(writeId) : AcidUtils.deltaSubdir(writeId, writeId, stmtId)); } if (!isAcidIUDoperation && isFullAcidTable) { destPath = fixFullAcidPathForLoadData(loadFileType, destPath, writeId, stmtId, tbl); @@ -1963,20 +1983,20 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par boolean needRecycle = !tbl.isTemporary() && ReplChangeManager.isSourceOfReplication(Hive.get().getDatabase(tbl.getDbName())); replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), isSrcLocal, - isAutoPurge, newFiles, FileUtils.HIDDEN_FILES_PATH_FILTER, needRecycle, isManaged); + isAutoPurge, newFiles, FileUtils.HIDDEN_FILES_PATH_FILTER, needRecycle, isManaged); } else { FileSystem fs = destPath.getFileSystem(conf); copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, - (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles, - tbl.getNumBuckets() > 0, isFullAcidTable, isManaged); + (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles, + tbl.getNumBuckets() > 0, isFullAcidTable, isManaged); } } perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES); Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString()); validatePartition(newTPart); - AcidUtils.TableSnapshot tableSnapshot = null; - tableSnapshot = AcidUtils.getTableSnapshot(conf, newTPart.getTable(), true); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTPart.getTable() + , true); if (tableSnapshot != null) { newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId()); } @@ -2008,7 +2028,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo(); /* Construct list bucketing location mappings from sub-directory name. */ Map, String> skewedColValueLocationMaps = constructListBucketingLocationMap( - newPartPath, skewedInfo); + newPartPath, skewedInfo); /* Add list bucketing location mappings. */ skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); newCreatedTpart.getSd().setSkewedInfo(skewedInfo); @@ -2020,16 +2040,16 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par newTPart.getTPartition().setParameters(new HashMap()); if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(), - MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); + MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); } // Note: we are creating a brand new the partition, so this is going to be valid for ACID. List filesForStats = null; if (isTxnTable) { filesForStats = AcidUtils.getAcidFilesForStats( - newTPart.getTable(), newPartPath, conf, null); + newTPart.getTable(), newPartPath, conf, null); } else { filesForStats = HiveStatsUtils.getFileStatusRecurse( - newPartPath, -1, newPartPath.getFileSystem(conf)); + newPartPath, -1, newPartPath.getFileSystem(conf)); } if (filesForStats != null) { MetaStoreServerUtils.populateQuickStats(filesForStats, newTPart.getParameters()); @@ -2037,33 +2057,6 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par // The ACID state is probably absent. Warning is logged in the get method. MetaStoreServerUtils.clearQuickStats(newTPart.getParameters()); } - try { - LOG.debug("Adding new partition " + newTPart.getSpec()); - getSynchronizedMSC().add_partition(newTPart.getTPartition()); - } catch (AlreadyExistsException aee) { - // With multiple users concurrently issuing insert statements on the same partition has - // a side effect that some queries may not see a partition at the time when they're issued, - // but will realize the partition is actually there when it is trying to add such partition - // to the metastore and thus get AlreadyExistsException, because some earlier query just created it (race condition). - // For example, imagine such a table is created: - // create table T (name char(50)) partitioned by (ds string); - // and the following two queries are launched at the same time, from different sessions: - // insert into table T partition (ds) values ('Bob', 'today'); -- creates the partition 'today' - // insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException - // In that case, we want to retry with alterPartition. - LOG.debug("Caught AlreadyExistsException, trying to alter partition instead"); - setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot); - } catch (Exception e) { - try { - final FileSystem newPathFileSystem = newPartPath.getFileSystem(this.getConf()); - boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); - final FileStatus status = newPathFileSystem.getFileStatus(newPartPath); - Hive.trashFiles(newPathFileSystem, new FileStatus[] {status}, this.getConf(), isAutoPurge); - } catch (IOException io) { - LOG.error("Could not delete partition directory contents after failed partition creation: ", io); - } - throw e; - } // For acid table, add the acid_write event with file list at the time of load itself. But // it should be done after partition is created. @@ -2089,8 +2082,93 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } + + } + + private void addPartitionToMetastore(Partition newTPart, boolean hasFollowingStatsTask, + Table tbl) throws HiveException{ + try { + LOG.debug("Adding new partition " + newTPart.getSpec()); + getSynchronizedMSC().add_partition(newTPart.getTPartition()); + } catch (AlreadyExistsException aee) { + // With multiple users concurrently issuing insert statements on the same partition has + // a side effect that some queries may not see a partition at the time when they're issued, + // but will realize the partition is actually there when it is trying to add such partition + // to the metastore and thus get AlreadyExistsException, because some earlier query just created it (race condition). + // For example, imagine such a table is created: + // create table T (name char(50)) partitioned by (ds string); + // and the following two queries are launched at the same time, from different sessions: + // insert into table T partition (ds) values ('Bob', 'today'); -- creates the partition 'today' + // insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException + // In that case, we want to retry with alterPartition. + LOG.debug("Caught AlreadyExistsException, trying to alter partition instead"); + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTPart.getTable() + , true); + try { + setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot); + } catch (TException e) { + + } + } catch (Exception e) { + try { + final FileSystem newPathFileSystem = newTPart.getPartitionPath().getFileSystem(this.getConf()); + boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); + final FileStatus status = newPathFileSystem.getFileStatus(newTPart.getPartitionPath()); + Hive.trashFiles(newPathFileSystem, new FileStatus[]{status}, this.getConf(), isAutoPurge); + } catch (IOException io) { + LOG.error("Could not delete partition directory contents after failed partition creation: ", io); + } + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } } + private void addPartitionsToMetastore(List partitions, + boolean hasFollowingStatsTask, Table tbl) throws HiveException { + try { + String debugMsg = "Adding new partitions "; + for (Partition partition : partitions) { + debugMsg += partition.getSpec() + " "; + } + LOG.debug(debugMsg); + getMSC().add_partitions(partitions.parallelStream().map(Partition::getTPartition).collect(Collectors.toList())); + } catch(AlreadyExistsException aee) { + // With multiple users concurrently issuing insert statements on the same partition has + // a side effect that some queries may not see a partition at the time when they're issued, + // but will realize the partition is actually there when it is trying to add such partition + // to the metastore and thus get AlreadyExistsException, because some earlier query just created it (race condition). + // For example, imagine such a table is created: + // create table T (name char(50)) partitioned by (ds string); + // and the following two queries are launched at the same time, from different sessions: + // insert into table T partition (ds) values ('Bob', 'today'); -- creates the partition 'today' + // insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException + // In that case, we want to retry with alterPartition. + LOG.debug("Caught AlreadyExistsException, trying to alter partition instead"); + for (Partition partition : partitions) { + AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, partition.getTable(), + true); + try { + setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, partition, tableSnapshot); + } catch (TException e) { + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } + } catch (Exception e) { + try { + for (Partition partition : partitions) { + final FileSystem newPathFileSystem = partition.getPartitionPath().getFileSystem(this.getConf()); + boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); + final FileStatus status = newPathFileSystem.getFileStatus(partition.getPartitionPath()); + Hive.trashFiles(newPathFileSystem, new FileStatus[]{status}, this.getConf(), isAutoPurge); + } + } catch (IOException io) { + LOG.error("Could not delete partition directory contents after failed partition creation: ", io); + } + LOG.error(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } private static Path genPartPathFromTable(Table tbl, Map partSpec, Path tblDataLocationPath) throws MetaException { @@ -2375,130 +2453,128 @@ private void constructOneLBLocationMap(FileStatus fSta, PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); - final Map, Partition> partitionsMap = - Collections.synchronizedMap(new LinkedHashMap, Partition>()); - - int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1); - final ExecutorService pool = Executors.newFixedThreadPool(poolSize, - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("load-dynamic-partitions-%d") - .build()); - // Get all valid partition paths and existing partitions for them (if any) final Table tbl = getTable(tableName); final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, - AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); + AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); - final int partsToLoad = validPartitions.size(); - final AtomicInteger partitionsLoaded = new AtomicInteger(0); - - final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 - && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent(); - final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null; final SessionState parentSession = SessionState.get(); + class PartitionWithFullSpec { + private Partition partition; + private LinkedHashMap fullPartSpec; + } - final List> futures = Lists.newLinkedList(); - // for each dynamically created DP directory, construct a full partition spec - // and load the partition based on that - final Map rawStoreMap = new ConcurrentHashMap<>(); - try { - for(final Path partPath : validPartitions) { - // generate a full partition specification - final LinkedHashMap fullPartSpec = Maps.newLinkedHashMap(partSpec); - if (!Warehouse.makeSpecFromName( - fullPartSpec, partPath, new HashSet(partSpec.keySet()))) { - Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); - continue; - } - futures.add(pool.submit(new Callable() { - @Override - public Void call() throws Exception { - try { - // move file would require session details (needCopy() invokes SessionState.get) - SessionState.setCurrentSessionState(parentSession); - LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec); - - // load the partition - Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, loadFileType, - true, false, numLB > 0, false, isAcid, hasFollowingStatsTask, writeId, stmtId, - isInsertOverwrite); - partitionsMap.put(fullPartSpec, newPartition); - - if (inPlaceEligible) { - synchronized (ps) { - InPlaceUpdate.rePositionCursor(ps); - partitionsLoaded.incrementAndGet(); - InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" - + partsToLoad + " partitions."); - } - } - return null; - } catch (Exception t) { - LOG.error("Exception when loading partition with parameters " + List> tasks = Lists.newLinkedList(); + + PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partSpec, + conf.getInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 300)); + + // map valid partition paths to partitions + Iterator iterator = partitionIterable.iterator(); + Map pathToPartitionMap = Maps.newLinkedHashMap(); + while (iterator.hasNext()) { + Partition partition = iterator.next(); + Optional path = validPartitions.parallelStream().filter(partition.getPartitionPath()::equals).findFirst(); + path.ifPresent(p -> { + pathToPartitionMap.put(p, partition); + validPartitions.remove(p); + }); + } + validPartitions.forEach(path -> pathToPartitionMap.put(path, null)); + + + for (final Path partPath : pathToPartitionMap.keySet()) { + final LinkedHashMap fullPartSpec = Maps.newLinkedHashMap(partSpec); + if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, partSpec.keySet())) { + Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath); + continue; + } + tasks.add(() -> { + try { + SessionState.setCurrentSessionState(parentSession); + LOG.info("New loading path = " + partPath + " withPartSpec " + fullPartSpec); + + Partition oldPartition = pathToPartitionMap.get(partPath); + + PartitionWithFullSpec result = new PartitionWithFullSpec(); + // load the partition + result.partition = loadPartitionInternal(partPath, tbl, fullPartSpec, oldPartition, + loadFileType, true, false, numLB > 0, false, isAcid, + hasFollowingStatsTask, writeId, stmtId, isInsertOverwrite); + result.fullPartSpec = fullPartSpec; + return result; + } catch (Exception e) { + LOG.error("Exception when loading partition with parameters " + " partPath=" + partPath + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " loadFileType=" + loadFileType.toString() + ", " + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " - + " hasFollowingStatsTask=" + hasFollowingStatsTask, t); - throw t; - } finally { - // Add embedded rawstore, so we can cleanup later to avoid memory leak - if (getMSC().isLocalMetaStore()) { - Long threadId = Thread.currentThread().getId(); - RawStore threadLocalRawStore = HiveMetaStore.HMSHandler.getRawStore(); - if (threadLocalRawStore == null) { - // If the thread local rawStore is already cleaned by current thread, then remove from rawStoreMap. - rawStoreMap.remove(threadId); - } else { - // If same thread is re-used, then need to cleanup the latest thread local instance of rawStore. - // So, overwrite the old one if exists in rawStoreMap. - rawStoreMap.put(threadId, threadLocalRawStore); - } - } - } - } - })); - } - pool.shutdown(); - LOG.debug("Number of partitions to be added is " + futures.size()); + + " hasFollowingStatsTask=" + hasFollowingStatsTask, e); + throw e; + } + }); + } + + int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1); + ExecutorService executor = Executors.newFixedThreadPool(poolSize, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitions-%d").build()); - for (Future future : futures) { - future.get(); + List> futures = Lists.newLinkedList(); + Map, Partition> partitionsMap = + Collections.synchronizedMap(new LinkedHashMap<>()); + try { + futures = executor.invokeAll(tasks); + LOG.debug("Number of partitions to be added is " + futures.size()); + List partitions = Lists.newArrayList(); + for (Future future : futures) { + PartitionWithFullSpec partitionWithFullSpec = future.get(); + partitions.add(partitionWithFullSpec.partition); + partitionsMap.put(partitionWithFullSpec.fullPartSpec, partitionWithFullSpec.partition); } + addPartitionsToMetastore(partitions, hasFollowingStatsTask, tbl); } catch (InterruptedException | ExecutionException e) { - LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); - //cancel other futures - for (Future future : futures) { - future.cancel(true); - } - throw new HiveException("Exception when loading " - + partsToLoad + " in table " + tbl.getTableName() - + " with loadPath=" + loadPath, e); + throw new HiveException("Exception when loading " + validPartitions.size() + + " in table " + tbl.getTableName() + + " with loadPath=" + loadPath); + } catch (Exception e) { + LOG.error("Exception when loading partitions with parameters " + + " partPaths=" + validPartitions.toArray(new String[validPartitions.size()]) + ", " + + " table=" + tbl.getTableName() + ", " + + " partSpec=" + partSpec + ", " + + " loadFileType=" + loadFileType.toString() + ", " + + " listBucketingLevel=" + numLB + ", " + + " isAcid=" + isAcid + ", " + + " hasFollowingStatsTask=" + hasFollowingStatsTask, e); + throw e; } finally { - rawStoreMap.forEach((k, rs) -> rs.shutdown()); + LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); + executor.shutdownNow(); + } + + if (conf.getLong("fs.trash.interval", 0) <= 0 + && InPlaceUpdate.canRenderInPlace(conf) + && !SessionState.getConsole().getIsSilent()) { + PrintStream ps = SessionState.getConsole().getInfoStream(); + InPlaceUpdate.rePositionCursor(ps); + InPlaceUpdate.reprintLine(ps, + "Loaded : " + futures.size() + "/" + validPartitions.size() + " partitions."); + } try { if (isAcid) { - List partNames = new ArrayList<>(partitionsMap.size()); - for (Partition p : partitionsMap.values()) { - partNames.add(p.getName()); - } + List partNames = + partitionsMap.values().parallelStream().map(Partition::getName).collect(Collectors.toList()); getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, - tbl.getDbName(), tbl.getTableName(), partNames, - AcidUtils.toDataOperationType(operation)); + tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation)); } - LOG.info("Loaded " + partitionsMap.size() + " partitions"); - - perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); + LOG.info("Loaded " + partitionsMap.size() + "partitions"); return partitionsMap; } catch (TException te) { - throw new HiveException("Exception updating metastore for acid table " - + tableName + " with partitions " + partitionsMap.values(), te); + throw new HiveException("Exception updating metastore for acid table"); } }