diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e577f5e..c0cff98 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -72,7 +72,10 @@ import org.apache.calcite.rel.core.Project; import org.apache.calcite.rel.core.TableScan; import org.apache.calcite.rex.RexBuilder; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FilenameUtils; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; @@ -155,7 +158,7 @@ @SuppressWarnings({"deprecation", "rawtypes"}) public class Hive { - static final private Logger LOG = LoggerFactory.getLogger("hive.ql.metadata.Hive"); + static final private Logger LOG = LoggerFactory.getLogger(Hive.class); private HiveConf conf = null; private IMetaStoreClient metaStoreClient; @@ -218,7 +221,6 @@ private void registerAllFunctionsOnce() throws HiveException { reloadFunctions(); didRegisterAllFuncs.compareAndSet(REG_FUNCS_PENDING, REG_FUNCS_DONE); } catch (Exception e) { - LOG.warn("Failed to register all functions.", e); didRegisterAllFuncs.compareAndSet(REG_FUNCS_PENDING, REG_FUNCS_NO); throw new HiveException(e); } finally { @@ -307,8 +309,9 @@ private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFast private static Hive create(HiveConf c, boolean needsRefresh, Hive db, boolean doRegisterAllFns) throws HiveException { if (db != null) { - LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh + - ", db.isCurrentUserOwner = " + db.isCurrentUserOwner()); + LOG.debug( + "Creating new db. db = {}, needsRefresh = {}, db.isCurrentUserOwner = {}", + db, needsRefresh, db.isCurrentUserOwner()); db.close(); } closeCurrent(); @@ -1050,8 +1053,6 @@ public void dropTable(String dbName, String tableName, boolean deleteData, } } - - /** * Truncates the table/partition as per specifications. Just trash the data files * @@ -1164,7 +1165,7 @@ public Table getTable(final String dbName, final String tableName, public Table getTable(final String dbName, final String tableName, boolean throwException, boolean checkTransactional) throws HiveException { - if (tableName == null || tableName.equals("")) { + if (org.apache.commons.lang.StringUtils.isEmpty(tableName)) { throw new HiveException("empty table creation??"); } @@ -1187,7 +1188,6 @@ public Table getTable(final String dbName, final String tableName, } } catch (NoSuchObjectException e) { if (throwException) { - LOG.error("Table " + dbName + "." + tableName + " not found: " + e.getMessage()); throw new InvalidTableException(tableName); } return null; @@ -1802,7 +1802,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par if (inheritLocation) { newPartPath = genPartPathFromTable(tbl, partSpec, tblDataLocationPath); - if(oldPart != null) { + if (oldPart != null) { /* * If we are moving the partition across filesystem boundaries * inherit from the table properties. Otherwise (same filesystem) use the @@ -1838,17 +1838,15 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par // Note: this assumes both paths are qualified; which they are, currently. if ((isMmTableWrite || isFullAcidTable) && loadPath.equals(newPartPath)) { // MM insert query, move itself is a no-op. - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("not moving " + loadPath + " to " + newPartPath + " (MM)"); - } + Utilities.FILE_OP_LOGGER.trace("not moving {} to {} (MM)", loadPath, + newPartPath); assert !isAcidIUDoperation; if (newFiles != null) { listFilesCreatedByQuery(loadPath, writeId, stmtId, isMmTableWrite ? isInsertOverwrite : false, newFiles); } - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("maybe deleting stuff from " + oldPartPath - + " (new " + newPartPath + ") for replace"); - } + Utilities.FILE_OP_LOGGER.trace( + "maybe deleting stuff from {} (new {}) for replace", oldPartPath, + newPartPath); } else { // Either a non-MM query, or a load into MM table from an external source. Path destPath = newPartPath; @@ -1861,9 +1859,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par if (!isAcidIUDoperation && isFullAcidTable) { destPath = fixFullAcidPathForLoadData(loadFileType, destPath, writeId, stmtId, tbl); } - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("moving " + loadPath + " to " + destPath); - } + Utilities.FILE_OP_LOGGER.trace("moving {} to {}", loadPath, destPath); boolean isManaged = tbl.getTableType() == TableType.MANAGED_TABLE; // TODO: why is "&& !isAcidIUDoperation" needed here? @@ -1949,7 +1945,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par MetaStoreUtils.clearQuickStats(newTPart.getParameters()); } try { - LOG.debug("Adding new partition " + newTPart.getSpec()); + LOG.debug("Adding new partition {}", newTPart.getSpec()); getSynchronizedMSC().add_partition(newTPart.getTPartition()); } catch (AlreadyExistsException aee) { // With multiple users concurrently issuing insert statements on the same partition has @@ -1988,21 +1984,16 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION); return newTPart; } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } catch (MetaException e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } catch (InvalidOperationException e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } - private static Path genPartPathFromTable(Table tbl, Map partSpec, Path tblDataLocationPath) throws MetaException { Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec)); @@ -2033,8 +2024,8 @@ private Path fixFullAcidPathForLoadData(LoadFileType loadFileType, Path destPath } try { FileSystem fs = tbl.getDataLocation().getFileSystem(SessionState.getSessionConf()); - if(!FileUtils.mkdir(fs, destPath, conf)) { - LOG.warn(destPath + " already exists?!?!"); + if (!FileUtils.mkdir(fs, destPath, conf)) { + LOG.warn("{} already exists", destPath); } } catch (IOException e) { throw new HiveException("load: error while creating " + destPath + ";loadFileType=" + loadFileType, e); @@ -2054,7 +2045,7 @@ private void listFilesInsideAcidDirectory(Path acidDir, FileSystem srcFs, List

, String> skewedColValueLocationMaps, Path newPartPath, SkewedInfo skewedInfo) throws IOException { /* Base Case. It's leaf. */ - if (!fSta.isDir()) { - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Processing LB leaf " + fSta.getPath()); - } + if (!fSta.isDirectory()) { + Utilities.FILE_OP_LOGGER.trace("Processing LB leaf {}", fSta); + /* construct one location map if not exists. */ constructOneLBLocationMap(fSta, skewedColValueLocationMaps, newPartPath, skewedInfo); return; @@ -2124,9 +2113,7 @@ private void walkDirTree(FileStatus fSta, FileSystem fSys, /* dfs. */ FileStatus[] children = fSys.listStatus(fSta.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); if (children != null) { - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Processing LB dir " + fSta.getPath()); - } + Utilities.FILE_OP_LOGGER.trace("Processing LB dir {}", fSta); for (FileStatus child : children) { walkDirTree(child, fSys, skewedColValueLocationMaps, newPartPath, skewedInfo); } @@ -2175,10 +2162,11 @@ private void constructOneLBLocationMap(FileStatus fSta, for (int i = 0; i < (dirNames.length - dirsToTake); ++i) { lbdPath = lbdPath.getParent(); } - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Saving LB location " + lbdPath + " based on " - + colCount + " keys and " + fSta.getPath()); - } + + Utilities.FILE_OP_LOGGER.trace( + "Saving LB location {} based on {} keys and {}", lbdPath, colCount, + fSta); + if ((skewedValue.size() > 0) && (skewedValue.size() == colCount) && !skewedColValueLocationMaps.containsKey(skewedValue)) { skewedColValueLocationMaps.put(skewedValue, lbdPath.toString()); @@ -2238,9 +2226,7 @@ private void constructOneLBLocationMap(FileStatus fSta, fs, loadPath, numDP, null, writeId, -1, conf, isInsertOverwrite); for (Path p : leafStatus) { Path dpPath = p.getParent(); // Skip the MM directory that we have found. - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Found DP " + dpPath); - } + Utilities.FILE_OP_LOGGER.trace("Found DP {}", dpPath); validPartitions.add(dpPath); } } @@ -2366,7 +2352,7 @@ public Void call() throws Exception { })); } pool.shutdown(); - LOG.debug("Number of partitions to be added is " + futures.size()); + LOG.debug("Number of partitions to be added is {}", futures.size()); for (Future future : futures) { future.get(); @@ -2374,7 +2360,7 @@ public Void call() throws Exception { rawStoreMap.forEach((k, rs) -> rs.shutdown()); } catch (InterruptedException | ExecutionException e) { - LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); + LOG.debug("Cancelling {} dynamic loading tasks", futures.size()); //cancel other futures for (Future future : futures) { future.cancel(true); @@ -2394,7 +2380,7 @@ public Void call() throws Exception { tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation)); } - LOG.info("Loaded " + partitionsMap.size() + " partitions"); + LOG.info("Loaded {} partitions", partitionsMap.size()); perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_DYNAMIC_PARTITIONS); @@ -2453,10 +2439,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType * and avoid the 'move' operation. Since MoveTask does other things, setting 'loadPath' to be * the table/partition path indicates that the 'file move' part of MoveTask is not needed. */ - if (Utilities.FILE_OP_LOGGER.isDebugEnabled()) { - Utilities.FILE_OP_LOGGER.debug( - "not moving " + loadPath + " to " + tbl.getPath() + " (MM)"); - } + Utilities.FILE_OP_LOGGER.debug("not moving {} to {} (MM)", loadPath, tbl); //new files list is required only for event notification. if (newFiles != null) { @@ -2475,8 +2458,8 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType if (!isAcidIUDoperation && isFullAcidTable) { destPath = fixFullAcidPathForLoadData(loadFileType, destPath, writeId, stmtId, tbl); } - Utilities.FILE_OP_LOGGER.debug("moving " + loadPath + " to " + tblPath - + " (replace = " + loadFileType + ")"); + Utilities.FILE_OP_LOGGER.debug("moving {} to {} (replace = {})", loadPath, + tblPath, loadFileType); perfLogger.PerfLogBegin("MoveTask", PerfLogger.FILE_MOVES); @@ -2484,7 +2467,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType if (loadFileType == LoadFileType.REPLACE_ALL && !isTxnTable) { //for fullAcid we don't want to delete any files even for OVERWRITE see HIVE-14988/HIVE-17361 - boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); + boolean isAutopurge = Boolean.parseBoolean(tbl.getProperty("auto.purge")); boolean needRecycle = !tbl.isTemporary() && ReplChangeManager.isSourceOfReplication(Hive.get().getDatabase(tbl.getDbName())); replaceFiles(tblPath, loadPath, destPath, tblPath, conf, isSrcLocal, isAutopurge, @@ -2520,7 +2503,6 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); } } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } @@ -2561,7 +2543,6 @@ public Partition createPartition(Table tbl, Map partSpec) throws part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return new Partition(tbl, getMSC().add_partition(part)); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -2624,7 +2605,6 @@ public Partition createPartition(Table tbl, Map partSpec) throws } } } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } return out; @@ -2715,14 +2695,12 @@ public Partition getPartition(Table tbl, Map partSpec, // getPartition() throws NoSuchObjectException to indicate null partition tpart = null; } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } try { if (forceCreate) { if (tpart == null) { - LOG.debug("creating partition for table " + tbl.getTableName() - + " with partition spec : " + partSpec); + LOG.debug("creating partition for table {} with partition spec {}", tbl, partSpec); try { tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals); } catch (AlreadyExistsException aee) { @@ -2758,17 +2736,15 @@ public Partition getPartition(Table tbl, Map partSpec, return null; } } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } return new Partition(tbl, tpart); } - private void alterPartitionSpec(Table tbl, - Map partSpec, - org.apache.hadoop.hive.metastore.api.Partition tpart, - boolean inheritTableSpecs, - String partPath) throws HiveException, InvalidOperationException { + private void alterPartitionSpec(Table tbl, Map partSpec, + org.apache.hadoop.hive.metastore.api.Partition tpart, + boolean inheritTableSpecs, String partPath) + throws HiveException, InvalidOperationException { alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath); String fullName = tbl.getTableName(); @@ -2783,8 +2759,8 @@ private void alterPartitionSpecInMemory(Table tbl, org.apache.hadoop.hive.metastore.api.Partition tpart, boolean inheritTableSpecs, String partPath) throws HiveException, InvalidOperationException { - LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : " - + partSpec); + LOG.debug("altering partition for table {} with partition spec : {}", tbl, + partSpec); if (inheritTableSpecs) { tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat()); tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat()); @@ -2795,8 +2771,9 @@ private void alterPartitionSpecInMemory(Table tbl, tpart.getSd().setNumBuckets(tbl.getNumBuckets()); tpart.getSd().setSortCols(tbl.getSortCols()); } - if (partPath == null || partPath.trim().equals("")) { - throw new HiveException("new partition path should not be null or empty."); + if (org.apache.commons.lang.StringUtils.isBlank(partPath)) { + throw new HiveException( + "new partition path should not be null or empty."); } tpart.getSd().setLocation(partPath); } @@ -2809,17 +2786,19 @@ private void addWriteNotificationLog(Table tbl, Map partitionSpe } if (tbl.isTemporary()) { - LOG.debug("write notification log is ignored as " + tbl.getTableName() + " is temporary : " + writeId); + LOG.debug("write notification log is ignored as {} is temporary : {}", + tbl, writeId); return; } - if (newFiles == null || newFiles.isEmpty()) { + if (CollectionUtils.isEmpty(newFiles)) { LOG.debug("write notification log is ignored as file list is empty"); return; } - LOG.debug("adding write notification log for operation " + writeId + " table " + tbl.getCompleteName() + - "partition " + partitionSpec + " list of files " + newFiles); + LOG.debug( + "adding write notification log for operation {} table {} partition {} list of files", + writeId, tbl, partitionSpec, newFiles); try { FileSystem fileSystem = tbl.getDataLocation().getFileSystem(conf); @@ -2832,7 +2811,7 @@ private void addWriteNotificationLog(Table tbl, Map partitionSpe tbl.getDbName(), tbl.getTableName(), insertData); addInsertFileInformation(newFiles, fileSystem, insertData); - if (partitionSpec != null && !partitionSpec.isEmpty()) { + if (MapUtils.isNotEmpty(partitionSpec)) { for (FieldSchema fs : tbl.getPartitionKeys()) { rqst.addToPartitionVals(partitionSpec.get(fs.getName())); } @@ -2848,7 +2827,7 @@ private void fireInsertEvent(Table tbl, Map partitionSpec, boole if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML)) { LOG.debug("Firing dml insert event"); if (tbl.isTemporary()) { - LOG.debug("Not firing dml insert event as " + tbl.getTableName() + " is temporary"); + LOG.debug("Not firing dml insert event as {} is temporary", tbl); return; } try { @@ -2879,7 +2858,6 @@ private void fireInsertEvent(Table tbl, Map partitionSpec, boole } } - private static void addInsertFileInformation(List newFiles, FileSystem fileSystem, InsertEventRequestData insertData) throws IOException { LinkedList directories = null; @@ -2914,7 +2892,6 @@ private static void addInsertFileInformation(List newFiles, FileSystem fil } } - private static void addInsertNonDirectoryInformation(Path p, FileSystem fileSystem, InsertEventRequestData insertData) throws IOException { insertData.addToFilesAdded(p.toString()); @@ -3086,7 +3063,6 @@ public boolean dropPartition(String dbName, String tableName, List partV try { names = getMSC().listPartitionNames(dbName, tblName, max); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } return names; @@ -3102,7 +3078,6 @@ public boolean dropPartition(String dbName, String tableName, List partV try { names = getMSC().listPartitionNames(dbName, tblName, pvals, max); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } return names; @@ -3122,7 +3097,6 @@ public boolean dropPartition(String dbName, String tableName, List partV tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } List parts = new ArrayList(tParts.size()); @@ -3132,9 +3106,7 @@ public boolean dropPartition(String dbName, String tableName, List partV return parts; } else { Partition part = new Partition(tbl); - ArrayList parts = new ArrayList(1); - parts.add(part); - return parts; + return Collections.singletonList(part); } } @@ -3152,7 +3124,6 @@ public boolean dropPartition(String dbName, String tableName, List partV try { tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } Set parts = new LinkedHashSet(tParts.size()); @@ -3192,7 +3163,7 @@ public boolean dropPartition(String dbName, String tableName, List partV List qlPartitions = new ArrayList(); for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) { - qlPartitions.add( new Partition(tbl, p)); + qlPartitions.add(new Partition(tbl, p)); } return qlPartitions; @@ -3266,10 +3237,10 @@ public boolean dropPartition(String dbName, String tableName, List partV try { for (int i = 0; i < nBatches; ++i) { List tParts = - getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), - partNames.subList(i*batchSize, (i+1)*batchSize)); + getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), + partNames.subList(i * batchSize, (i + 1) * batchSize)); if (tParts != null) { - for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) { + for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { partitions.add(new Partition(tbl, tpart)); } } @@ -3277,10 +3248,10 @@ public boolean dropPartition(String dbName, String tableName, List partV if (nParts > nBatches * batchSize) { List tParts = - getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), - partNames.subList(nBatches*batchSize, nParts)); + getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), + partNames.subList(nBatches * batchSize, nParts)); if (tParts != null) { - for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) { + for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { partitions.add(new Partition(tbl, tpart)); } } @@ -3316,7 +3287,7 @@ public boolean dropPartition(String dbName, String tableName, List partV private static List convertFromMetastore(Table tbl, List partitions) throws HiveException { if (partitions == null) { - return new ArrayList(); + return Collections.emptyList(); } List results = new ArrayList(partitions.size()); @@ -3375,7 +3346,6 @@ public void validatePartitionNameCharacters(List partVals) throws HiveEx try { getMSC().validatePartitionNameCharacters(partVals); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -3421,7 +3391,6 @@ public void dropRole(String roleName) throws HiveException { } } - public boolean grantRole(String roleName, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) throws HiveException { @@ -3522,7 +3491,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, if (!fullDestStatus.getFileStatus().isDirectory()) { throw new HiveException(destf + " is not a directory."); } - final List>> futures = new LinkedList<>(); + final List>> futures = new ArrayList<>(); final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) : null; @@ -3602,7 +3571,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, for (Future> future : futures) { try { ObjectPair pair = future.get(); - LOG.debug("Moved src: {}, to dest: {}", pair.getFirst().toString(), pair.getSecond().toString()); + LOG.debug("Moved src: {}, to dest: {}", pair.getFirst(), pair.getSecond()); } catch (Exception e) { throw handlePoolException(pool, e); } @@ -3621,7 +3590,7 @@ private static boolean isSubDir(Path srcf, Path destf, FileSystem srcFs, FileSys boolean isInTest = HiveConf.getBoolVar(srcFs.getConf(), ConfVars.HIVE_IN_TEST); // In the automation, the data warehouse is the local file system based. - LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2); + LOG.debug("The source path is {} and the destination path is {}", fullF1, fullF2); if (isInTest) { return fullF1.startsWith(fullF2); } @@ -3638,12 +3607,13 @@ private static boolean isSubDir(Path srcf, Path destf, FileSystem srcFs, FileSys // If both schema information are provided, they should be the same. if (schemaSrcf != null && schemaDestf != null && !schemaSrcf.equals(schemaDestf)) { - LOG.debug("The source path's schema is " + schemaSrcf + - " and the destination path's schema is " + schemaDestf + "."); + LOG.debug( + "The source path's schema is {} and the destination path's schema is {}.", + schemaSrcf, schemaDestf); return false; } - LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2); + LOG.debug("The source path is {} and the destination path is {}", fullF1, fullF2); return fullF1.startsWith(fullF2); } @@ -3769,7 +3739,6 @@ public static void listNewFilesRecursively(final FileSystem destFs, Path dest, } } } catch (IOException e) { - LOG.error("Failed to get source file statuses", e); throw new HiveException(e.getMessage(), e); } } @@ -3800,13 +3769,11 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, try { destFs = destf.getFileSystem(conf); } catch (IOException e) { - LOG.error("Failed to get dest fs", e); throw new HiveException(e.getMessage(), e); } try { srcFs = srcf.getFileSystem(conf); } catch (IOException e) { - LOG.error("Failed to get src fs", e); throw new HiveException(e.getMessage(), e); } @@ -3833,7 +3800,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, // to delete the file first if (replace && !srcIsSubDirOfDest) { destFs.delete(destf, true); - LOG.debug("The path " + destf.toString() + " is deleted"); + LOG.debug("The path {} is deleted", destf); } } catch (FileNotFoundException ignore) { } @@ -3847,7 +3814,9 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, } else { if (needToCopy(srcf, destf, srcFs, destFs, configuredOwner, isManaged)) { //copy if across file system or encryption zones. - LOG.debug("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different."); + LOG.debug( + "Copying source {} to {} because HDFS encryption zones are different.", + srcf, destf); return FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf, true, // delete source replace, // overwrite destination @@ -4083,11 +4052,10 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem srcFs = srcf.getFileSystem(conf); srcs = srcFs.globStatus(srcf); } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException("addFiles: filesystem error in check phase. " + e.getMessage(), e); } if (srcs == null) { - LOG.info("No sources specified to move: " + srcf); + LOG.info("No sources specified to move: {}", srcf); return; // srcs = new FileStatus[0]; Why is this needed? } @@ -4122,12 +4090,12 @@ public static void moveAcidFiles(FileSystem fs, FileStatus[] stats, Path dst, for (FileStatus stat : stats) { Path srcPath = stat.getPath(); - LOG.debug("Acid move Looking for original buckets in " + srcPath); + LOG.debug("Acid move Looking for original buckets in {}", srcPath); FileStatus[] origBucketStats = null; try { origBucketStats = fs.listStatus(srcPath, AcidUtils.originalBucketFilter); - if(origBucketStats == null || origBucketStats.length == 0) { + if (ArrayUtils.isEmpty(origBucketStats)) { /** check if we are dealing with data with non-standard layout. For example a write produced by a (optimized) Union All query @@ -4151,18 +4119,18 @@ produced by a (optimized) Union All query FileStatus[] unionSubdirs = fs.globStatus(new Path(srcPath, AbstractFileMergeOperator.UNION_SUDBIR_PREFIX + "[0-9]*")); List buckets = new ArrayList<>(); - for(FileStatus unionSubdir : unionSubdirs) { + for (FileStatus unionSubdir : unionSubdirs) { Collections.addAll(buckets, fs.listStatus(unionSubdir.getPath(), AcidUtils.originalBucketFilter)); } origBucketStats = buckets.toArray(new FileStatus[buckets.size()]); } } catch (IOException e) { - String msg = "Unable to look for bucket files in src path " + srcPath.toUri().toString(); - LOG.error(msg); - throw new HiveException(msg, e); + throw new HiveException( + "Unable to look for bucket files in src path " + srcPath.toUri(), + e); } - LOG.debug("Acid move found " + origBucketStats.length + " original buckets"); + LOG.debug("Acid move found {} original buckets", origBucketStats.length); for (FileStatus origBucketStat : origBucketStats) { Path origBucketPath = origBucketStat.getPath(); @@ -4179,16 +4147,17 @@ produced by a (optimized) Union All query private static void moveAcidFiles(String deltaFileType, PathFilter pathFilter, FileSystem fs, Path dst, Path origBucketPath, Set createdDeltaDirs, List newFiles) throws HiveException { - LOG.debug("Acid move looking for " + deltaFileType + " files in bucket " + origBucketPath); + LOG.debug("Acid move looking for {} files in bucket {}", deltaFileType, + origBucketPath); FileStatus[] deltaStats = null; try { deltaStats = fs.listStatus(origBucketPath, pathFilter); } catch (IOException e) { - throw new HiveException("Unable to look for " + deltaFileType + " files in original bucket " + - origBucketPath.toUri().toString(), e); + throw new HiveException("Unable to look for " + deltaFileType + + " files in original bucket " + origBucketPath.toUri(), e); } - LOG.debug("Acid move found " + deltaStats.length + " " + deltaFileType + " files"); + LOG.debug("Acid move found {} {} files", deltaStats.length, deltaFileType); for (FileStatus deltaStat : deltaStats) { Path deltaPath = deltaStat.getPath(); @@ -4212,14 +4181,14 @@ private static void moveAcidFiles(String deltaFileType, PathFilter pathFilter, F } } FileStatus[] bucketStats = fs.listStatus(deltaPath, AcidUtils.bucketFileFilter); - LOG.debug("Acid move found " + bucketStats.length + " bucket files"); + LOG.debug("Acid move found {} bucket files", bucketStats.length ); for (FileStatus bucketStat : bucketStats) { Path bucketSrc = bucketStat.getPath(); Path bucketDest = new Path(deltaDest, bucketSrc.getName()); final String msg = "Unable to move source " + bucketSrc + " to destination " + bucketDest; - LOG.info("Moving bucket " + bucketSrc.toUri().toString() + " to " + - bucketDest.toUri().toString()); + LOG.info("Moving bucket {} to {}", bucketSrc.toUri(), + bucketDest.toUri()); try { fs.rename(bucketSrc, bucketDest); if (newFiles != null) { @@ -4273,10 +4242,10 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, srcFs = srcf.getFileSystem(conf); srcs = srcFs.globStatus(srcf); } catch (IOException e) { - throw new HiveException("Getting globStatus " + srcf.toString(), e); + throw new HiveException("Getting globStatus " + srcf, e); } if (srcs == null) { - LOG.info("No sources specified to move: " + srcf); + LOG.info("No sources specified to move: {}", srcf); return; } @@ -4287,9 +4256,9 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates // destf boolean destfExist = FileUtils.mkdir(destFs, destf, conf); - if(!destfExist) { - throw new IOException("Directory " + destf.toString() - + " does not exist and could not be created."); + if (!destfExist) { + throw new IOException( + "Directory " + destf + " does not exist and could not be created."); } // Two cases: @@ -4328,8 +4297,9 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, boolean purge, PathFilter pathFilter, boolean isNeedRecycle) throws HiveException { - Utilities.FILE_OP_LOGGER.debug("Deleting old paths for replace in " + destPath - + " and old path " + oldPath); + Utilities.FILE_OP_LOGGER.debug( + "Deleting old paths for replace in {} and old path {}", destPath, + oldPath); boolean isOldPathUnderDestf = false; try { FileSystem oldFs = oldPath.getFileSystem(conf); @@ -4345,11 +4315,11 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, } catch (IOException e) { if (isOldPathUnderDestf) { // if oldPath is a subdir of destf but it could not be cleaned - throw new HiveException("Directory " + oldPath.toString() - + " could not be cleaned up.", e); + throw new HiveException( + "Directory " + oldPath + " could not be cleaned up.", e); } else { //swallow the exception since it won't affect the final result - LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e); + LOG.warn("Directory {} cannot be cleaned", oldPath, e); } } } @@ -4361,7 +4331,7 @@ private void cleanUpOneDirectoryForReplace(Path path, FileSystem fs, recycleDirToCmPath(path, purge); } FileStatus[] statuses = fs.listStatus(path, pathFilter); - if (statuses == null || statuses.length == 0) { + if (ArrayUtils.isEmpty(statuses)) { return; } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { @@ -4391,7 +4361,7 @@ public static boolean trashFiles(final FileSystem fs, final FileStatus[] statuse throws IOException { boolean result = true; - if (statuses == null || statuses.length == 0) { + if (ArrayUtils.isEmpty(statuses)) { return false; } final List> futures = new LinkedList<>(); @@ -4418,7 +4388,6 @@ public Boolean call() throws Exception { try { result &= future.get(); } catch (InterruptedException | ExecutionException e) { - LOG.error("Failed to delete: ",e); pool.shutdownNow(); throw new IOException(e); } @@ -4441,7 +4410,6 @@ public static boolean isHadoop1() { return convertFromMetastore(getTable(destDb, destinationTableName), partitions); } catch (Exception ex) { - LOG.error(StringUtils.stringifyException(ex)); throw new HiveException(ex); } } @@ -4484,9 +4452,8 @@ private HiveStorageHandler createStorageHandler(org.apache.hadoop.hive.metastore HiveUtils.getStorageHandler(conf, tbl.getParameters().get(META_TABLE_STORAGE)); return storageHandler; } catch (HiveException ex) { - LOG.error(StringUtils.stringifyException(ex)); throw new MetaException( - "Failed to load storage handler: " + ex.getMessage()); + "Failed to load storage handler." + ex.getMessage()); } } @@ -4533,7 +4500,6 @@ public synchronized IMetaStoreClient getMSC( owner = UserGroupInformation.getCurrentUser(); } catch(IOException e) { String msg = "Error getting current user: " + e.getMessage(); - LOG.error(msg, e); throw new MetaException(msg + "\n" + StringUtils.stringifyException(e)); } try { @@ -4600,7 +4566,7 @@ public boolean setPartitionColumnStatistics( request.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return getMSC().setPartitionColumnStatistics(request); } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -4621,7 +4587,7 @@ public boolean setPartitionColumnStatistics( } return retv; } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -4641,7 +4607,7 @@ public boolean setPartitionColumnStatistics( return getMSC().getPartitionColumnStatistics( dbName, tableName, partNames, colNames, writeIdList); } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -4657,30 +4623,31 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, } return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, writeIdList); } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); - return new AggrStats(new ArrayList(),0); + LOG.debug("Error", e); + return new AggrStats(Collections.emptyList(), 0); } } - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) - throws HiveException { + public boolean deleteTableColumnStatistics(String dbName, String tableName, + String colName) throws HiveException { try { return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); - } catch(Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + } catch (Exception e) { + LOG.debug("Error", e); throw new HiveException(e); } } - public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - String colName) throws HiveException { - try { - return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); - } catch(Exception e) { - LOG.debug(StringUtils.stringifyException(e)); - throw new HiveException(e); - } + public boolean deletePartitionColumnStatistics(String dbName, + String tableName, String partName, String colName) throws HiveException { + try { + return getMSC().deletePartitionColumnStatistics(dbName, tableName, + partName, colName); + } catch (Exception e) { + LOG.debug("Error", e); + throw new HiveException(e); } + } public Table newTable(String tableName) throws HiveException { String[] names = Utilities.getDbTableName(tableName); @@ -4691,8 +4658,7 @@ public String getDelegationToken(String owner, String renewer) throws HiveException{ try { return getMSC().getDelegationToken(owner, renewer); - } catch(Exception e) { - LOG.error(StringUtils.stringifyException(e)); + } catch (Exception e) { throw new HiveException(e); } } @@ -4702,7 +4668,6 @@ public void cancelDelegationToken(String tokenStrForm) try { getMSC().cancelDelegationToken(tokenStrForm); } catch(Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -4727,29 +4692,36 @@ public void compact(String dbname, String tableName, String partName, String com * @return id of new request or id already existing request for specified resource * @throws HiveException */ - public CompactionResponse compact2(String dbname, String tableName, String partName, String compactType, - Map tblproperties) + public CompactionResponse compact2(String dbname, String tableName, + String partName, String compactType, Map tblproperties) throws HiveException { + + CompactionType cr = null; + String compactStr = org.apache.commons.lang.StringUtils + .defaultString(compactType).toLowerCase(); + + switch (compactStr) { + case "major": + cr = CompactionType.MAJOR; + break; + case "minor": + cr = CompactionType.MINOR; + break; + default: + throw new HiveException("Unknown compaction type " + compactType); + } + try { - CompactionType cr = null; - if ("major".equalsIgnoreCase(compactType)) { - cr = CompactionType.MAJOR; - } else if ("minor".equalsIgnoreCase(compactType)) { - cr = CompactionType.MINOR; - } else { - throw new RuntimeException("Unknown compaction type " + compactType); - } return getMSC().compact2(dbname, tableName, partName, cr, tblproperties); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } + public ShowCompactResponse showCompactions() throws HiveException { try { return getMSC().showCompactions(); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -4758,7 +4730,6 @@ public GetOpenTxnsInfoResponse showTransactions() throws HiveException { try { return getMSC().showTxns(); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -4767,7 +4738,6 @@ public void abortTransactions(List txnids) throws HiveException { try { getMSC().abortTxns(txnids); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } } @@ -4847,7 +4817,8 @@ public void clearMetaCallTiming() { boolean phaseInfoLogged = false; if (LOG.isDebugEnabled()) { phaseInfoLogged = logDumpPhase(phase); - LOG.debug("Total time spent in each metastore function (ms): " + metaCallTimeMap); + LOG.debug("Total time spent in each metastore function (ms): {}", + metaCallTimeMap); } if (LOG.isInfoEnabled()) { @@ -4858,8 +4829,9 @@ public void clearMetaCallTiming() { if (!phaseInfoLogged) { phaseInfoLogged = logDumpPhase(phase); } - LOG.info("Total time spent in this metastore function was greater than 1000ms : " - + callTime); + LOG.info( + "Total time spent in this metastore function was greater than 1000ms : {}", + callTime); } } } @@ -4870,7 +4842,8 @@ public void clearMetaCallTiming() { } private boolean logDumpPhase(String phase) { - LOG.info("Dumping metastore api call timing information for : " + phase + " phase"); + LOG.info("Dumping metastore api call timing information for : {} phase", + phase); return true; }