diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java index 8dcce30928..75f5118b16 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloDefaultIndexScanner.java @@ -113,12 +113,12 @@ public void init(Configuration conf) { if (col != null) { try { - LOG.debug("Searching tab=" + indexTable + " column=" + column + " range=" + indexRange); + LOG.debug("Searching tab={} column={} range={}", indexTable, column, indexRange); Connector conn = getConnector(); scan = conn.createScanner(indexTable, auths); scan.setRange(indexRange); Text cf = new Text(col); - LOG.debug("Using Column Family=" + toString()); + LOG.debug("Using Column Family={}", this); scan.fetchColumnFamily(cf); for (Map.Entry entry : scan) { @@ -132,11 +132,7 @@ public void init(Configuration conf) { } // no hits on the index so return a no match range - if (rowIds.isEmpty()) { - LOG.debug("Found 0 index matches"); - } else { - LOG.debug("Found " + rowIds.size() + " index matches"); - } + LOG.debug("Found {} index matches", rowIds.size()); return rowIds; } catch (AccumuloException | AccumuloSecurityException | TableNotFoundException e) { @@ -149,7 +145,7 @@ public void init(Configuration conf) { } // assume the index is bad and do a full scan - LOG.debug("Index lookup failed for table " + indexTable); + LOG.debug("Index lookup failed for table {}", indexTable); return null; } diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java index d2aecc17e5..ca60c69ca8 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java @@ -236,7 +236,7 @@ public void configureInputJobProperties(TableDesc tableDesc, Map // Probably don't have a JobConf here, but we can still try... if (conf instanceof JobConf) { // Convert the Accumulo token in a Hadoop token - LOG.debug("Adding Hadoop Token for Accumulo to Job's Credentials: " + accumuloToken); + LOG.debug("Adding Hadoop Token for Accumulo to Job's Credentials: {}", accumuloToken); // Add the Hadoop token to the JobConf JobConf jobConf = (JobConf) conf; @@ -476,7 +476,7 @@ public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { // Add the Hadoop token to the JobConf helper.mergeTokenIntoJobConf(jobConf, accumuloToken); - LOG.debug("All job tokens: " + jobConf.getCredentials().getAllTokens()); + LOG.debug("All job tokens: {}", jobConf.getCredentials().getAllTokens()); } catch (Exception e) { throw new RuntimeException("Failed to obtain DelegationToken for " + connectionParams.getAccumuloUserName(), e); diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java index 32a4f305c2..7806f7ad1a 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java @@ -235,7 +235,7 @@ public void setInputFormatConnectorInfo(JobConf conf, String username, Authentic AccumuloInputFormat.setConnectorInfo(conf, username, token); } catch (IllegalStateException e) { // AccumuloInputFormat complains if you re-set an already set value. We just don't care. - log.debug("Ignoring exception setting Accumulo Connector instance for user " + username, e); + log.debug("Ignoring exception setting Accumulo Connector instance for user {}", username, e); } } @@ -249,7 +249,7 @@ public void setOutputFormatConnectorInfo(JobConf conf, String username, Authenti AccumuloOutputFormat.setConnectorInfo(conf, username, token); } catch (IllegalStateException e) { // AccumuloOutputFormat complains if you re-set an already set value. We just don't care. - log.debug("Ignoring exception setting Accumulo Connector instance for user " + username, e); + log.debug("Ignoring exception setting Accumulo Connector instance for user {}", username, e); } } @@ -264,8 +264,7 @@ public void setInputFormatZooKeeperInstance(JobConf conf, String instanceName, S AccumuloInputFormat.setZooKeeperInstance(conf, clientConf); } catch (IllegalStateException ise) { // AccumuloInputFormat complains if you re-set an already set value. We just don't care. - log.debug("Ignoring exception setting ZooKeeper instance of " + instanceName + " at " - + zookeepers, ise); + log.debug("Ignoring exception setting ZooKeeper instance of {} at {}", instanceName, zookeepers, ise); } } @@ -280,8 +279,7 @@ public void setOutputFormatZooKeeperInstance(JobConf conf, String instanceName, AccumuloOutputFormat.setZooKeeperInstance(conf, clientConf); } catch (IllegalStateException ise) { // AccumuloOutputFormat complains if you re-set an already set value. We just don't care. - log.debug("Ignoring exception setting ZooKeeper instance of " + instanceName + " at " - + zookeepers, ise); + log.debug("Ignoring exception setting ZooKeeper instance of {} at {}", instanceName, zookeepers, ise); } } @@ -294,7 +292,7 @@ public void setInputFormatMockInstance(JobConf conf, String instanceName) { AccumuloInputFormat.setMockInstance(conf, instanceName); } catch (IllegalStateException e) { // AccumuloInputFormat complains if you re-set an already set value. We just don't care. - log.debug("Ignoring exception setting mock instance of " + instanceName, e); + log.debug("Ignoring exception setting mock instance of {}", instanceName, e); } } @@ -307,7 +305,7 @@ public void setOutputFormatMockInstance(JobConf conf, String instanceName) { AccumuloOutputFormat.setMockInstance(conf, instanceName); } catch (IllegalStateException e) { // AccumuloOutputFormat complains if you re-set an already set value. We just don't care. - log.debug("Ignoring exception setting mock instance of " + instanceName, e); + log.debug("Ignoring exception setting mock instance of {}", instanceName, e); } } diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index cef8fde594..83d23dc89a 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -702,15 +702,15 @@ public static boolean distCp(FileSystem srcFS, List srcPaths, Path dst, */ public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boolean purge) throws IOException { - LOG.debug("deleting " + f); + LOG.debug("Deleting {}", f); boolean result = false; try { if(purge) { - LOG.debug("purge is set to true. Not moving to Trash " + f); + LOG.debug("purge is set to true. Not moving to Trash {}", f); } else { result = Trash.moveToAppropriateTrash(fs, f, conf); if (result) { - LOG.trace("Moved to trash: " + f); + LOG.trace("Moved to trash: {}", f); return true; } } diff --git a/common/src/java/org/apache/hadoop/hive/common/auth/HiveAuthUtils.java b/common/src/java/org/apache/hadoop/hive/common/auth/HiveAuthUtils.java index c976285f47..6f38f21aa0 100644 --- a/common/src/java/org/apache/hadoop/hive/common/auth/HiveAuthUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/auth/HiveAuthUtils.java @@ -111,7 +111,7 @@ public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, Str List enabledProtocols = new ArrayList(); for (String protocol : sslServerSocket.getEnabledProtocols()) { if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) { - LOG.debug("Disabling SSL Protocol: " + protocol); + LOG.debug("Disabling SSL Protocol: {}", protocol); } else { enabledProtocols.add(protocol); } diff --git a/common/src/java/org/apache/hadoop/hive/common/type/TimestampTZUtil.java b/common/src/java/org/apache/hadoop/hive/common/type/TimestampTZUtil.java index 4708d35a78..06b11af3da 100644 --- a/common/src/java/org/apache/hadoop/hive/common/type/TimestampTZUtil.java +++ b/common/src/java/org/apache/hadoop/hive/common/type/TimestampTZUtil.java @@ -115,9 +115,7 @@ public static TimestampTZ parseOrNull(String s, ZoneId defaultTimeZone) { try { return parse(s, defaultTimeZone); } catch (DateTimeParseException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Invalid string " + s + " for TIMESTAMP WITH TIME ZONE", e); - } + LOG.debug("Invalid string {} for TIMESTAMP WITH TIME ZONE", s, e); return null; } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 0da5e8bb01..e82aeae028 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -5737,7 +5737,7 @@ private void initialize(Class cls) { if (msUri == null || msUri.isEmpty()) { msUri = this.get("metastore.thrift.uris"); } - LOG.debug("Found metastore URI of " + msUri); + LOG.debug("Found metastore URI of {}", msUri); if(HiveConfUtil.isEmbeddedMetaStore(msUri)){ setLoadMetastoreConfig(true); } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java index a28580cba1..6aa532f8d9 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java @@ -191,8 +191,8 @@ public static void updateJobCredentialProviders(Configuration jobConf) { if (StringUtils.isNotBlank(jobKeyStoreLocation)) { jobConf.set(Constants.HADOOP_CREDENTIAL_PROVIDER_PATH_CONFIG, jobKeyStoreLocation); - LOG.debug("Setting job conf credstore location to " + jobKeyStoreLocation - + " previous location was " + oldKeyStoreLocation); + LOG.debug("Setting job conf credstore location to {} previous location was {}", jobKeyStoreLocation, + oldKeyStoreLocation); } String credstorePassword = getJobCredentialProviderPassword(jobConf); diff --git a/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java b/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java index 973b9acae2..f9b44950ae 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java +++ b/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java @@ -60,7 +60,7 @@ public String substitute(HiveConf conf, String expr) { return expr; } if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTE)) { - l4j.debug("Substitution is on: " + expr); + l4j.debug("Substitution is on: {}", expr); } else { return expr; } diff --git a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java index f1181fd268..622508b1d7 100644 --- a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java +++ b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java @@ -136,9 +136,7 @@ public static void setPerfLogger(PerfLogger resetPerfLogger) { public void PerfLogBegin(String callerName, String method) { long startTime = System.currentTimeMillis(); startTimes.put(method, Long.valueOf(startTime)); - if (LOG.isDebugEnabled()) { - LOG.debug(""); - } + LOG.debug("", method, callerName); beginMetrics(method); } /** diff --git a/common/src/java/org/apache/hive/common/util/HiveVersionInfo.java b/common/src/java/org/apache/hive/common/util/HiveVersionInfo.java index 9f033e77f0..575820e0f4 100644 --- a/common/src/java/org/apache/hive/common/util/HiveVersionInfo.java +++ b/common/src/java/org/apache/hive/common/util/HiveVersionInfo.java @@ -124,7 +124,7 @@ public static String getBuildVersion(){ } public static void main(String[] args) { - LOG.debug("version: "+ version); + LOG.debug("Version: {}", version); System.out.println("Hive " + getVersion()); System.out.println("Git " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); diff --git a/common/src/java/org/apache/hive/http/JMXJsonServlet.java b/common/src/java/org/apache/hive/http/JMXJsonServlet.java index 7b2f89e3e1..1e05957b9e 100644 --- a/common/src/java/org/apache/hive/http/JMXJsonServlet.java +++ b/common/src/java/org/apache/hive/http/JMXJsonServlet.java @@ -213,7 +213,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) { private void listBeans(JsonGenerator jg, ObjectName qry, String attribute, HttpServletResponse response) throws IOException { - LOG.debug("Listing beans for "+qry); + LOG.debug("Listing beans for {}", qry); Set names = null; names = mBeanServer.queryNames(qry, null); @@ -324,7 +324,7 @@ private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeIn // UnsupportedOperationExceptions happen in the normal course of business, // so no need to log them as errors all the time. if (e.getCause() instanceof UnsupportedOperationException) { - LOG.debug("getting attribute "+attName+" of "+oname+" is unsupported"); + LOG.debug("getting attribute {} of {} is unsupported", attName, oname); } else { LOG.error("getting attribute "+attName+" of "+oname+" threw an exception", e); } @@ -332,7 +332,7 @@ private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeIn } catch (RuntimeErrorException e) { // RuntimeErrorException happens when an unexpected failure occurs in getAttribute // for example https://issues.apache.org/jira/browse/DAEMON-120 - LOG.debug("getting attribute "+attName+" of "+oname+" threw an exception", e); + LOG.debug("getting attribute {} of {} threw an exception", attName, oname, e); return; } catch (AttributeNotFoundException e) { //Ignored the attribute was not found, which should never happen because the bean diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java index c887394b97..93e3edc5eb 100644 --- a/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java +++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapCacheAwareFs.java @@ -74,9 +74,7 @@ public static Path registerFile(DataCache cache, Path path, Object fileKey, } public static void unregisterFile(Path cachePath) { - if (LOG.isDebugEnabled()) { - LOG.debug("Unregistering " + cachePath); - } + LOG.debug("Unregistering {}", cachePath); files.remove(extractSplitId(cachePath)); } diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java index 9ad148681b..567bd0d087 100644 --- a/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapHiveUtils.java @@ -81,9 +81,8 @@ public static CacheTag getDbAndTableNameForMetrics(Path path, boolean includePar */ public static MapWork findMapWork(JobConf job) throws HiveException { String inputName = job.get(Utilities.INPUT_NAME, null); - if (LOG.isDebugEnabled()) { - LOG.debug("Initializing for input " + inputName); - } + LOG.debug("Initializing for input {}", inputName); + String prefixes = job.get(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES); if (prefixes != null && !StringUtils.isBlank(prefixes)) { // Currently SMB is broken, so we cannot check if it's compatible with IO elevator. diff --git a/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java b/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java index c71c637c71..b77bad0d23 100644 --- a/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java +++ b/ql/src/java/org/apache/hadoop/hive/llap/LlapOutputFormatService.java @@ -195,7 +195,7 @@ private void registerReader(ChannelHandlerContext ctx, String id, byte[] tokenBy return; } } - LOG.debug("registering socket for: " + id); + LOG.debug("registering socket for: {}", id); int maxPendingWrites = HiveConf.getIntVar(conf, HiveConf.ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES); boolean useArrow = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java index f7de0c605b..22bf76e6a6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java @@ -147,7 +147,7 @@ public static void addTablePartsOutputs(Hive db, Set outputs, Table parts.add(p); } } catch (HiveException e) { - LOG.debug("Wrong specification" + StringUtils.stringifyException(e)); + LOG.debug("Wrong specification", e); throw new SemanticException(e.getMessage(), e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java index 4fb53785c2..a88700c8fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java @@ -68,7 +68,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException { mvRebuildDbName = tableName.getDb(); mvRebuildName = tableName.getTable(); - LOG.debug("Rebuilding materialized view " + tableName.getNotEmptyDbTable()); + LOG.debug("Rebuilding materialized view {}", tableName.getNotEmptyDbTable()); super.analyzeInternal(rewrittenAST); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java index ff0c9e253d..de081e9953 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java @@ -103,9 +103,7 @@ public void process(Object row, int tag) throws HiveException { throw new HiveException(e); } - if (LOG.isDebugEnabled()) { - LOG.debug("AppMasterEvent: " + row); - } + LOG.debug("AppMasterEvent: {}", row); forward(row, rowInspector); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index dc6d31a9cb..6c72bfa42b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -79,9 +79,8 @@ private ColumnStatistics constructColumnStatsFromInput() // If we are replicating the stats, we don't need to construct those again. if (work.getColStats() != null) { ColumnStatistics colStats = work.getColStats(); - LOG.debug("Got stats through replication for " + - colStats.getStatsDesc().getDbName() + "." + - colStats.getStatsDesc().getTableName()); + LOG.debug("Got stats through replication for `{}`.`{}`", colStats.getStatsDesc().getDbName(), + colStats.getStatsDesc().getTableName()); return colStats; } String dbName = work.dbName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index bc42df121f..918aa45d79 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -154,7 +154,7 @@ public FetchOperator(FetchWork work, JobConf job, Operator operator, public void setValidWriteIdList(String writeIdStr) { job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIdStr); - LOG.debug("FetchOperator set writeIdStr: " + writeIdStr); + LOG.debug("FetchOperator set writeIdStr: {}", writeIdStr); } private void initialize() throws HiveException { ensureCorrectSchemaEvolutionConfigs(job); @@ -408,7 +408,7 @@ public boolean doNext(WritableComparable key, Writable value) throws IOException List dirs = new ArrayList<>(), dirsWithOriginals = new ArrayList<>(); processCurrPathForMmWriteIds(inputFormat, dirs, dirsWithOriginals); if (dirs.isEmpty() && dirsWithOriginals.isEmpty()) { - LOG.debug("No valid directories for " + currPath); + LOG.debug("No valid directories for {}", currPath); continue; } @@ -444,7 +444,7 @@ public boolean doNext(WritableComparable key, Writable value) throws IOException } if (inputSplits.isEmpty()) { - LOG.debug("No splits for " + currPath); + LOG.debug("No splits for {}", currPath); continue; } if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_IN_TEST)) { @@ -495,7 +495,7 @@ private String makeInputString(List dirs) { private ValidWriteIdList extractValidWriteIdList() { if (currDesc.getTableName() == null || !org.apache.commons.lang3.StringUtils.isBlank(currDesc.getTableName())) { String txnString = job.get(ValidWriteIdList.VALID_WRITEIDS_KEY); - LOG.debug("FetchOperator get writeIdStr: " + txnString); + LOG.debug("FetchOperator get writeIdStr: {}", txnString); return txnString == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString); } return null; // not fetching from a table directly but from a temp location diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index d121a21f62..07f8ae3998 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -299,7 +299,7 @@ public void abortWritersAndUpdaters(FileSystem fs, boolean abort, boolean delete for (int idx = 0; idx < outWriters.length; idx++) { if (outWriters[idx] != null) { try { - LOG.debug("Aborted: closing: " + outWriters[idx].toString()); + LOG.debug("Aborted: closing: {}", outWriters[idx]); outWriters[idx].close(abort); if (delete) { fs.delete(outPaths[idx], true); @@ -313,7 +313,7 @@ public void abortWritersAndUpdaters(FileSystem fs, boolean abort, boolean delete for (int idx = 0; idx < updaters.length; idx++) { if (updaters[idx] != null) { try { - LOG.debug("Aborted: closing: " + updaters[idx].toString()); + LOG.debug("Aborted: closing: {}", updaters[idx]); updaters[idx].close(abort); if (delete) { fs.delete(outPaths[idx], true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index b0c586201b..e27735595c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -1075,9 +1075,8 @@ public static GenericUDAFEvaluator getGenericWindowingEvaluator(String name, public static GenericUDAFResolver getGenericUDAFResolver(String functionName) throws SemanticException { - if (LOG.isDebugEnabled()) { - LOG.debug("Looking up GenericUDAF: " + functionName); - } + LOG.debug("Looking up GenericUDAF: {}", functionName); + FunctionInfo finfo = getFunctionInfo(functionName); if (finfo == null) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index c16aad81c0..d273c1cca7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -435,10 +435,7 @@ public void setChildren(Configuration hconf) throws Exception { for (String alias : aliases) { Operator op = conf.getAliasToWork().get(alias); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding alias " + alias + " to work list for file " - + onefile); - } + LOG.debug("Adding alias {} to work list for file {}", alias, onefile); Map, MapOpCtx> contexts = opCtxMap.computeIfAbsent(onefile, k -> new LinkedHashMap<>()); if (contexts.containsKey(op)) { @@ -522,9 +519,7 @@ public void cleanUpInputFileChangedOp() throws HiveException { } builder.append(context.alias); } - if (LOG.isDebugEnabled()) { - LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath); - } + LOG.debug("Processing alias(es) {} for file {}", builder, fpath); } // Add alias, table name, and partitions to hadoop conf so that their // children will inherit these diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 51de87f2fd..3e3fb7b05a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -319,9 +319,9 @@ public int execute() { Path targetPath = lfd.getTargetDir(); Path sourcePath = lfd.getSourcePath(); if (targetPath.equals(sourcePath)) { - Utilities.FILE_OP_LOGGER.debug("MoveTask not moving " + sourcePath); + Utilities.FILE_OP_LOGGER.debug("MoveTask not moving {}", sourcePath); } else { - Utilities.FILE_OP_LOGGER.debug("MoveTask moving " + sourcePath + " to " + targetPath); + Utilities.FILE_OP_LOGGER.debug("MoveTask moving {} to {}", sourcePath, targetPath); if(lfd.getWriteType() == AcidUtils.Operation.INSERT) { //'targetPath' is table root of un-partitioned table or partition //'sourcePath' result of 'select ...' part of CTAS statement @@ -332,7 +332,7 @@ public int execute() { List newFiles = new ArrayList<>(); Hive.moveAcidFiles(srcFs, srcs, targetPath, newFiles); } else { - LOG.debug("No files found to move from " + sourcePath + " to " + targetPath); + LOG.debug("No files found to move from {} to {}", sourcePath, targetPath); } } else { @@ -360,7 +360,7 @@ public int execute() { if (!destFs.exists(destPath.getParent())) { destFs.mkdirs(destPath.getParent()); } - Utilities.FILE_OP_LOGGER.debug("MoveTask moving (multi-file) " + srcPath + " to " + destPath); + Utilities.FILE_OP_LOGGER.debug("MoveTask moving (multi-file) {} to {}", srcPath, destPath); moveFile(srcPath, destPath, isDfsDir); } else { if (!destFs.exists(destPath)) { @@ -372,11 +372,11 @@ public int execute() { for (FileStatus child : children) { Path childSrc = child.getPath(); Path childDest = new Path(destPath, filePrefix + childSrc.getName()); - Utilities.FILE_OP_LOGGER.debug("MoveTask moving (multi-file) " + childSrc + " to " + childDest); + Utilities.FILE_OP_LOGGER.debug("MoveTask moving (multi-file) {} to {}", childSrc, childDest); moveFile(childSrc, childDest, isDfsDir); } } else { - Utilities.FILE_OP_LOGGER.debug("MoveTask skipping empty directory (multi-file) " + srcPath); + Utilities.FILE_OP_LOGGER.debug("MoveTask skipping empty directory (multi-file) {}", srcPath); } if (!srcFs.delete(srcPath, false)) { throw new IOException("Couldn't delete " + srcPath + " after moving all the files"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java index 51c3b6ff61..8f5d519506 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java @@ -96,7 +96,7 @@ private void updateSrcFileListForDupCopy(FileSystem dstFs, Path toPath, List fetchOpJobConfMap) throws HiveException { - for (Map.Entry> entry : work.getAliasToWork().entrySet()) { - LOG.debug("initializeOperators: " + entry.getKey() + ", children = " + entry.getValue().getChildOperators()); + if (LOG.isDebugEnabled()) { + for (Map.Entry> entry : work.getAliasToWork().entrySet()) { + LOG.debug("initializeOperators: {}, children = {}", entry.getKey(), entry.getValue().getChildOperators()); + } } // this mapper operator is used to initialize all the operators for (Map.Entry entry : work.getAliasToFetchWork().entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java index b194c5f4e5..5bb96e3f77 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java @@ -40,9 +40,7 @@ @Override public void release(String key) { // nothing to do - if (LOG.isDebugEnabled()) { - LOG.debug(key + " no longer needed"); - } + LOG.debug("{} no longer needed", key); } @Override @@ -53,9 +51,7 @@ public void release(String key) { @Override public T retrieve(String key, Callable fn) throws HiveException { try { - if (LOG.isDebugEnabled()) { - LOG.debug("Creating " + key); - } + LOG.debug("Creating {}", key); return fn.call(); } catch (Exception e) { throw new HiveException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/Throttle.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/Throttle.java index 1fb2332400..6f9e315838 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/Throttle.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/Throttle.java @@ -62,7 +62,7 @@ public static void checkJobTracker(JobConf conf, Logger LOG) { while (true) { // read in the first 1K characters from the URL URL url = new URL(tracker); - LOG.debug("Throttle: URL " + tracker); + LOG.debug("Throttle: URL {}", tracker); InputStream in = null; try { in = url.openStream(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java index 999fe3d9b5..a90b4c9fec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java @@ -653,7 +653,7 @@ private int findKeySlotToWrite(long keyOffset, int keyLength, int hashCode) { } if (largestNumberOfSteps < i) { if (LOG.isDebugEnabled()) { - LOG.debug("Probed " + i + " slots (the longest so far) to find space"); + LOG.debug("Probed {} slots (the longest so far) to find space", i); } largestNumberOfSteps = i; // debugDumpKeyProbe(keyOffset, keyLength, hashCode, slot); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/FlatRowContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/FlatRowContainer.java index bb0baca0c9..f1bf50ef23 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/FlatRowContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/FlatRowContainer.java @@ -94,7 +94,7 @@ public void add(MapJoinObjectSerDeContext context, @Override public void addRow(List t) throws HiveException { - LOG.debug("Add is called with " + t.size() + " objects"); + LOG.debug("Add is called with {} objects", t.size()); // This is not called when building HashTable; we don't expect it to be called ever. int offset = prepareForAdd(t.size()); if (offset < 0) return; @@ -105,7 +105,7 @@ public void addRow(List t) throws HiveException { @Override public void addRow(Object[] value) throws HiveException { - LOG.debug("Add is called with " + value.length + " objects"); + LOG.debug("Add is called with {} objects", value.length); // This is not called when building HashTable; we don't expect it to be called ever. int offset = prepareForAdd(value.length); if (offset < 0) return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 2e0af02094..eb4c555157 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -201,9 +201,11 @@ private void finishRemainingTasks() throws SemanticException, IOException { } private void prepareReturnValues(List values) throws SemanticException { - LOG.debug("prepareReturnValues : " + dumpSchema); - for (String s : values) { - LOG.debug(" > " + s); + if (LOG.isDebugEnabled()) { + LOG.debug("prepareReturnValues : {}", dumpSchema); + for (String s : values) { + LOG.debug(" > {}", s); + } } Utils.writeOutput(Collections.singletonList(values), new Path(work.resultTempPath), conf); } @@ -642,7 +644,7 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) metadataPath.getFileSystem(conf).delete(metadataPath, true); } for (String dbName : Utils.matchesDb(hiveDb, work.dbNameOrPattern)) { - LOG.debug("Dumping db: " + dbName); + LOG.debug("Dumping db: {}", dbName); // TODO : Currently we don't support separate table list for each database. tableList = work.replScope.includeAllTables() ? null : new ArrayList<>(); Database db = hiveDb.getDatabase(dbName); @@ -665,7 +667,7 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) try (Writer writer = new Writer(dbRoot, conf)) { List extTableLocations = new LinkedList<>(); for (String tblName : Utils.matchesTbl(hiveDb, dbName, work.replScope)) { - LOG.debug("Dumping table: " + tblName + " to db root " + dbRoot.toUri()); + LOG.debug("Dumping table: {} to db root {}", tblName, dbRoot.toUri()); Table table = null; try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index a5935552c4..731eaf65ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -469,7 +469,7 @@ private int executeIncrementalLoad() { DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(updateReplIdTask)); work.setLastReplIDUpdated(true); - LOG.debug("Added task to set last repl id of db " + dbName + " to " + lastEventid); + LOG.debug("Added task to set last repl id of db {} to {}", dbName, lastEventid); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java index a311f7ae22..0e64337eb3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java @@ -189,7 +189,7 @@ private Path getDbLevelDataPath() { private BootstrapEvent postProcessing(BootstrapEvent bootstrapEvent) { previous = next; next = null; - LOG.debug("processing " + previous); + LOG.debug("processing {}", previous); return bootstrapEvent; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java index 7383d018ec..630acd9714 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java @@ -75,7 +75,7 @@ Path computeStagingDir(Path inputPath) { stagingPathName + "_" + generateExecutionId() + "-" + TaskRunner.getTaskRunnerID()); dir = fileSystem.makeQualified(path); - LOG.debug("Created staging dir = " + dir + " for path = " + inputPath); + LOG.debug("Created staging dir = {} for path = {}", dir, inputPath); if (!FileUtils.mkdir(fileSystem, dir, hiveConf)) { throw new IllegalStateException( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 7e844d3164..a91b7c8092 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -95,7 +95,7 @@ public IncrementalLoadTasksBuilder(String dbName, String loadPath, Long lastReplayedEvent = null; this.log = log; numIteration++; - this.log.debug("Iteration num " + numIteration); + this.log.debug("Iteration num {}", numIteration); while (iterator.hasNext() && tracker.canAddMoreTasks()) { FileStatus dir = iterator.next(); @@ -175,8 +175,8 @@ private boolean isEventNotReplayed(Map params, FileStatus dir, D if (params != null && (params.containsKey(ReplicationSpec.KEY.CURR_STATE_ID.toString()))) { String replLastId = params.get(ReplicationSpec.KEY.CURR_STATE_ID.toString()); if (Long.parseLong(replLastId) >= Long.parseLong(dir.getPath().getName())) { - log.debug("Event " + dumpType + " with replId " + Long.parseLong(dir.getPath().getName()) - + " is already replayed. LastReplId - " + Long.parseLong(replLastId)); + log.debug("Event {} with replId {} is already replayed. LastReplId - {}", + dumpType, Long.parseLong(dir.getPath().getName()), Long.parseLong(replLastId)); return false; } } @@ -194,7 +194,7 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return (database == null) || isEventNotReplayed(database.getParameters(), dir, dumpType); } catch (HiveException e) { // May be the db is getting created in this load - log.debug("Failed to get the database " + dbName); + log.debug("Failed to get the database {}", dbName); return true; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java index b9285accbd..637bfa0ba1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkDynamicPartitionPruner.java @@ -226,15 +226,11 @@ private void applyFilterToPartitions( "Could not find partition value for column: " + columnName); Object partValue = converter.convert(partValueString); - if (LOG.isDebugEnabled()) { - LOG.debug("Converted partition value: " + partValue + " original (" + partValueString + ")"); - } + LOG.debug("Converted partition value: {} original ({})", partValue, partValueString); row[0] = partValue; partValue = eval.evaluate(row); - if (LOG.isDebugEnabled()) { - LOG.debug("part key expr applied: " + partValue); - } + LOG.debug("part key expr applied: {}", partValue); if (!values.contains(partValue)) { LOG.info("Pruning path: " + p); @@ -266,9 +262,7 @@ private void applyFilterToPartitions( deserializer.initialize(jobConf, table.getProperties()); ObjectInspector inspector = deserializer.getObjectInspector(); - if (LOG.isDebugEnabled()) { - LOG.debug("Type of obj insp: " + inspector.getTypeName()); - } + LOG.debug("Type of obj insp: {}", inspector.getTypeName()); soi = (StructObjectInspector) inspector; List fields = soi.getAllStructFieldRefs(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMetricUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMetricUtils.java index 1f856ae3f1..270893ca52 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMetricUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMetricUtils.java @@ -52,8 +52,7 @@ public static void updateSparkBytesWrittenMetrics(Logger log, FileSystem fs, Pat try { bytesWritten.addAndGet(fs.getFileStatus(path).getLen()); } catch (IOException e) { - log.debug("Unable to collect stats for file: " + path + " output metrics may be inaccurate", - e); + log.debug("Unable to collect stats for file: {} output metrics may be inaccurate", path, e); } }); if (bytesWritten.get() > 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java index f401b4dcea..f300ea1ff4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java @@ -133,7 +133,7 @@ public int execute() { // Add Spark job handle id to the Hive History addToHistory(Keys.SPARK_JOB_HANDLE_ID, jobRef.getJobId()); - LOG.debug("Starting Spark job with job handle id " + sparkJobHandleId); + LOG.debug("Starting Spark job with job handle id {}", sparkJobHandleId); // Get the application id of the Spark app jobID = jobRef.getSparkJobStatus().getAppID(); @@ -177,7 +177,7 @@ public int execute() { // TODO: If the timeout is because of lack of resources in the cluster, we should // ideally also cancel the app request here. But w/o facilities from Spark or YARN, // it's difficult to do it on hive side alone. See HIVE-12650. - LOG.debug("Failed to submit Spark job with job handle id " + sparkJobHandleId); + LOG.debug("Failed to submit Spark job with job handle id {}", sparkJobHandleId); LOG.info("Failed to submit Spark job for application id " + (Strings.isNullOrEmpty(jobID) ? "UNKNOWN" : jobID)); killJob(); @@ -465,7 +465,7 @@ public void shutdown() { } private void killJob() { - LOG.debug("Killing Spark job with job handle id " + sparkJobHandleId); + LOG.debug("Killing Spark job with job handle id {}", sparkJobHandleId); boolean needToKillJob = false; if (jobRef != null && !jobKilled) { synchronized (this) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java index 24b801e9c6..39751f0faa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java @@ -238,9 +238,7 @@ public void onRootVertexInitialized(String inputName, InputDescriptor inputDescr } } - if (LOG.isDebugEnabled()) { - LOG.debug("Path file splits map for input name: " + inputName + " is " + pathFileSplitsMap); - } + LOG.debug("Path file splits map for input name: {} is {}", inputName, pathFileSplitsMap); Multimap bucketToInitialSplitMap = getBucketSplitMapForPath(inputName, pathFileSplitsMap); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index 6f5830dfc0..f45b5b4967 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -1530,7 +1530,7 @@ public Path createTezDir(Path scratchDir, Configuration conf) Path tezDir = getTezDir(scratchDir); if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) { FileSystem fs = tezDir.getFileSystem(conf); - LOG.debug("TezDir path set " + tezDir + " for user: " + userName); + LOG.debug("TezDir path set {} for user: {}", tezDir, userName); // since we are adding the user name to the scratch dir, we do not // need to give more permissions here // Since we are doing RPC creating a dir is not necessary diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java index ca1472219b..98ca484dfd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java @@ -288,15 +288,11 @@ private void applyFilterToPartitions(Converter converter, ExprNodeEvaluator eval } Object partValue = converter.convert(partValueString); - if (LOG.isDebugEnabled()) { - LOG.debug("Converted partition value: " + partValue + " original (" + partValueString + ")"); - } + LOG.debug("Converted partition value: {} original ({})", partValue, partValueString); row[0] = partValue; partValue = eval.evaluate(row); - if (LOG.isDebugEnabled()) { - LOG.debug("part key expr applied: " + partValue); - } + LOG.debug("part key expr applied: {}", partValue); if (!values.contains(partValue) && (!mustKeepOnePartition || work.getPathToPartitionInfo().size() > 1)) { LOG.info("Pruning path: " + p); @@ -357,7 +353,7 @@ public SourceInfo(TableDesc table, ExprNodeDesc partKey, String columnName, Stri deserializer.initialize(jobConf, table.getProperties()); ObjectInspector inspector = deserializer.getObjectInspector(); - LOG.debug("Type of obj insp: " + inspector.getTypeName()); + LOG.debug("Type of obj insp: {}", inspector.getTypeName()); soi = (StructObjectInspector) inspector; List fields = soi.getAllStructFieldRefs(); @@ -436,9 +432,7 @@ protected String processPayload(ByteBuffer payload, String sourceName) throws Se Object value = info.soi.getStructFieldData(row, info.field); value = ObjectInspectorUtils.copyToStandardObject(value, info.fieldInspector); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding: " + value + " to list of required partitions"); - } + LOG.debug("Adding: {} to list of required partitions", value); info.values.add(value); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicValueRegistryTez.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicValueRegistryTez.java index e46774213b..d6634081b4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicValueRegistryTez.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicValueRegistryTez.java @@ -135,7 +135,7 @@ public void init(RegistryConf conf) throws Exception { } // For now, expecting a single row (min/max, aggregated bloom filter), or no rows if (rowCount == 0) { - LOG.debug("No input rows from " + inputSourceName + ", filling dynamic values with nulls"); + LOG.debug("No input rows from {}, filling dynamic values with nulls", inputSourceName); for (int colIdx = 0; colIdx < colExprEvaluators.size(); ++colIdx) { ExprNodeEvaluator eval = colExprEvaluators.get(colIdx); setValue(runtimeValuesInfo.getDynamicValueIDs().get(colIdx), null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java index 67d6982ac0..107f53c95f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java @@ -292,9 +292,7 @@ public HiveSplitGenerator(InputInitializerContext initializerContext) throws IOE counterName = Utilities.getVertexCounterName(HiveInputCounters.GROUPED_INPUT_SPLITS.name(), vertexName); tezCounters.findCounter(groupName, counterName).setValue(flatSplits.length); - if (LOG.isDebugEnabled()) { - LOG.debug("Published tez counters: " + tezCounters); - } + LOG.debug("Published tez counters: {}", tezCounters); inputInitializerContext.addCounters(tezCounters); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java index a1d422b486..55710becab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java @@ -58,9 +58,7 @@ public HostAffinitySplitLocationProvider(List knownLocations) { @Override public String[] getLocations(InputSplit split) throws IOException { if (!(split instanceof FileSplit)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Split: " + split + " is not a FileSplit. Using default locations"); - } + LOG.debug("Split: {} is not a FileSplit. Using default locations", split); return split.getLocations(); } FileSplit fsplit = (FileSplit) split; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java index 903526387d..17368b6763 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java @@ -85,9 +85,7 @@ public void release(String key) { try { value = (T) registry.getIfPresent(key); if (value != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Found " + key + " in cache"); - } + LOG.debug("Found {} in cache", key); return value; } @@ -107,9 +105,7 @@ public void release(String key) { try { value = (T) registry.getIfPresent(key); if (value != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Found " + key + " in cache"); - } + LOG.debug("Found {} in cache", key); return value; } } finally { @@ -124,9 +120,7 @@ public void release(String key) { lock.lock(); try { - if (LOG.isDebugEnabled()) { - LOG.debug("Caching new object for key: " + key); - } + LOG.debug("Caching new object for key: {}", key); registry.put(key, value); locks.remove(key); @@ -151,9 +145,7 @@ public T call() throws Exception { @Override public void remove(String key) { - if (LOG.isDebugEnabled()) { - LOG.debug("Removing key: " + key); - } + LOG.debug("Removing key: {}", key); registry.invalidate(key); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java index 8c9d53f521..9db98b8656 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java @@ -173,7 +173,7 @@ void init(MRTaskReporter mrReporter, createOutputMap(); // Start all the Outputs. for (Entry outputEntry : outputs.entrySet()) { - LOG.debug("Starting Output: " + outputEntry.getKey()); + LOG.debug("Starting Output: {}", outputEntry.getKey()); outputEntry.getValue().start(); ((TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/SessionExpirationTracker.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/SessionExpirationTracker.java index df14f11226..d6aa7f07ef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/SessionExpirationTracker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/SessionExpirationTracker.java @@ -134,9 +134,7 @@ private void runExpirationThread() { while (true) { // Restart the sessions until one of them refuses to restart. nextToExpire = expirationQueue.take(); - if (LOG.isDebugEnabled()) { - LOG.debug("Seeing if we can expire [" + nextToExpire + "]"); - } + LOG.debug("Seeing if we can expire [{}]", nextToExpire); try { if (!nextToExpire.tryExpire(false)) break; } catch (Exception e) { @@ -148,7 +146,7 @@ private void runExpirationThread() { LOG.info("Tez session [" + nextToExpire + "] has expired"); } if (nextToExpire != null && LOG.isDebugEnabled()) { - LOG.debug("[" + nextToExpire + "] is not ready to expire; adding it back"); + LOG.debug("[{}] is not ready to expire; adding it back", nextToExpire); } // See addToExpirationQueue for why we re-check the queue. @@ -198,9 +196,7 @@ public void stop() { public void addToExpirationQueue(TezSessionPoolSession session) { long jitterModMs = (long)(sessionLifetimeJitterMs * rdm.nextFloat()); session.setExpirationNs(System.nanoTime() + (sessionLifetimeMs + jitterModMs) * 1000000L); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding a pool session [" + this + "] to expiration queue"); - } + LOG.debug("Adding a pool session [{}] to expiration queue", this); // Expiration queue is synchronized and notified upon when adding elements. Without jitter, we // wouldn't need this, and could simple look at the first element and sleep for the wait time. // However, when many things are added at once, it may happen that we will see the one that diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java index 89954cba67..bdda4b97f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java @@ -188,9 +188,7 @@ private boolean returnSessionInternal(SessionType session, boolean isAsync) { if (!session.stopUsing()) return true; // The session will be restarted and return to us. boolean canPutBack = putSessionBack(session, true); if (canPutBack) return true; - if (LOG.isDebugEnabled()) { - LOG.debug("Closing an unneeded returned session " + session); - } + LOG.debug("Closing an unneeded returned session {}", session); if (isAsync) return false; // The caller is responsible for destroying the session. try { @@ -275,10 +273,7 @@ void replaceSession(SessionType oldSession) throws Exception { } newSession.open(); if (!putSessionBack(newSession, false)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Closing an unneeded session " + newSession - + "; trying to replace " + oldSession); - } + LOG.debug("Closing an unneeded session {}; trying to replace {}", newSession, oldSession); try { newSession.close(false); } catch (Exception ex) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java index 83daf9df3a..61a8413d06 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java @@ -562,9 +562,7 @@ private void updateSessions() { /** Called by TezSessionPoolSession when closed. */ @Override public void unregisterOpenSession(TezSessionPoolSession session) { - if (LOG.isDebugEnabled()) { - LOG.debug("Closed a pool session [" + this + "]"); - } + LOG.debug("Closed a pool session [{}]", this); synchronized (openSessions) { openSessions.remove(session); updateSessions(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index 8becef1cd3..1ff0b8fac9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -815,9 +815,7 @@ private LocalResource createJarLocalResource(String localJarPath) destFileName = FilenameUtils.removeExtension(destFileName) + "-" + sha + FilenameUtils.EXTENSION_SEPARATOR + FilenameUtils.getExtension(destFileName); - if (LOG.isDebugEnabled()) { - LOG.debug("The destination file name for [" + localJarPath + "] is " + destFileName); - } + LOG.debug("The destination file name for [{}] is {}", localJarPath, destFileName); // TODO: if this method is ever called on more than one jar, getting the dir and the // list need to be refactored out to be done only once. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 854bc89e9c..7b528f8705 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -424,9 +424,8 @@ DAG build(JobConf conf, TezWork tezWork, Path scratchDir, Context ctx, .put("description", ctx.getCmd()); String dagInfo = json.toString(); - if (LOG.isDebugEnabled()) { - LOG.debug("DagInfo: " + dagInfo); - } + LOG.debug("DagInfo: {}" + dagInfo); + dag.setDAGInfo(dagInfo); dag.setCredentials(conf.getCredentials()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java index 1aa133e517..879c9e5527 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java @@ -448,7 +448,7 @@ private void scheduleWork(WmThreadSyncWork context) { addKillQueryResult(toKill, true); killCtx.killSessionFuture.set(true); wmEvent.endEvent(toKill); - LOG.debug("Killed " + queryId); + LOG.debug("Killed {}", queryId); return; } catch (HiveException|IOException ex) { LOG.error("Failed to kill " + queryId + "; will try to restart AM instead" , ex); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index dc12d61589..3d96abcb4d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -1037,10 +1037,8 @@ public VectorExpression getVectorExpression(ExprNodeDesc exprDesc, VectorExpress } break; case ALL: - if (LOG.isDebugEnabled()) { - LOG.debug("We will try to use the VectorUDFAdaptor for " + exprDesc.toString() - + " because hive.vectorized.adaptor.usage.mode=all"); - } + LOG.debug("We will try to use the VectorUDFAdaptor for {} because hive.vectorized.adaptor.usage.mode=all", + exprDesc); ve = getCustomUDFExpression(expr, mode); break; default: @@ -1068,10 +1066,7 @@ public VectorExpression getVectorExpression(ExprNodeDesc exprDesc, VectorExpress throw new HiveException( "Could not vectorize expression (mode = " + mode.name() + "): " + exprDesc.toString()); } - if (LOG.isDebugEnabled()) { - LOG.debug("Input Expression = " + exprDesc.toString() - + ", Vectorized Expression = " + ve.toString()); - } + LOG.debug("Input Expression = {}, Vectorized Expression = {}", exprDesc, ve); return ve; } @@ -3046,9 +3041,7 @@ private HiveDecimal castConstantToDecimal(Object scalar, TypeInfo type) throws H throw new HiveException("Unsupported primitive category " + primitiveCategory + " for cast to HiveDecimal"); } if (rawDecimal == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Casting constant scalar " + scalar + " to HiveDecimal resulted in null"); - } + LOG.debug("Casting constant scalar {} to HiveDecimal resulted in null", scalar); return null; } return rawDecimal; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java index 66db5e65ca..f60cd08117 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java @@ -600,18 +600,14 @@ protected void reloadHashTable(byte pos, int partitionId) needHashTableSetup = true; LOG.info("Created " + vectorMapJoinHashTable.getClass().getSimpleName() + " from " + this.getClass().getSimpleName()); - if (LOG.isDebugEnabled()) { - LOG.debug(CLASS_NAME + " reloadHashTable!"); - } + LOG.debug(CLASS_NAME + " reloadHashTable!"); } @Override protected void reProcessBigTable(int partitionId) throws HiveException { - if (LOG.isDebugEnabled()) { - LOG.debug(CLASS_NAME + " reProcessBigTable enter..."); - } + LOG.debug(CLASS_NAME + " reProcessBigTable enter..."); if (spillReplayBatch == null) { // The process method was not called -- no big table rows. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/rowbytescontainer/VectorRowBytesContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/rowbytescontainer/VectorRowBytesContainer.java index 1c61c971f1..5c79683714 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/rowbytescontainer/VectorRowBytesContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/rowbytescontainer/VectorRowBytesContainer.java @@ -91,7 +91,7 @@ private void setupOutputFileStreams() throws IOException { parentDir = FileUtils.createLocalDirsTempFile(spillLocalDirs, "bytes-container", "", true); parentDir.deleteOnExit(); tmpFile = File.createTempFile("BytesContainer", ".tmp", parentDir); - LOG.debug("BytesContainer created temp file " + tmpFile.getAbsolutePath()); + LOG.debug("BytesContainer created temp file {}", tmpFile.getAbsolutePath()); tmpFile.deleteOnExit(); fileOutputStream = new FileOutputStream(tmpFile); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java index 0632f6ee2c..68e1cc532b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java @@ -373,7 +373,7 @@ TimelineEntity createPreHookEvent(String queryId, String query, JSONObject expla LOG.info("Received pre-hook notification for :" + queryId); if (LOG.isDebugEnabled()) { - LOG.debug("Otherinfo: " + queryObj.toString()); + LOG.debug("Otherinfo: " + queryObj); LOG.debug("Operation id: <" + opId + ">"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 98d5a79161..6336879249 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -461,9 +461,7 @@ public static int parseBucketId(Path bucketFile) { if (matcher.matches()) { String bucketId = matcher.group(1); filename = filename.substring(0,matcher.end(1)); - if (Utilities.FILE_OP_LOGGER.isDebugEnabled()) { - Utilities.FILE_OP_LOGGER.debug("Parsing bucket ID = " + bucketId + " from file name '" + filename + "'"); - } + Utilities.FILE_OP_LOGGER.debug("Parsing bucket ID = {} from file name '{}'", bucketId, filename); return Integer.parseInt(bucketId); } } @@ -477,9 +475,7 @@ public static String parseAttemptId(Path bucketFile) { if (matcher.matches()) { attemptId = matcher.group(2) != null ? matcher.group(2).substring(1) : null; } - if (Utilities.FILE_OP_LOGGER.isDebugEnabled()) { - Utilities.FILE_OP_LOGGER.debug("Parsing attempt ID = " + attemptId + " from file name '" + bucketFile + "'"); - } + Utilities.FILE_OP_LOGGER.debug("Parsing attempt ID = {} from file name '{}'", attemptId, bucketFile); return attemptId; } @@ -1415,8 +1411,7 @@ else if (prev != null && next.maxWriteId == prev.maxWriteId } } } - LOG.debug("in directory " + candidateDirectory.toUri().toString() + " base = " + base + " deltas = " + - deltas.size()); + LOG.debug("in directory {} base = {} deltas = " + deltas.size(), candidateDirectory.toUri(), base); /** * If this sort order is changed and there are tables that have been converted to transactional * and have had any update/delete/merge operations performed but not yet MAJOR compacted, it @@ -1865,7 +1860,7 @@ private static boolean isDirUsable(Path child, long visibilityTxnId, if(isAborted) { aborted.add(child);//so we can clean it up } - LOG.debug("getChildState() ignoring(" + aborted + ") " + child); + LOG.debug("getChildState() ignoring({}) {}", aborted, child); return false; } return true; @@ -2287,9 +2282,11 @@ public static ValidWriteIdList getTableValidWriteIdList(Configuration conf, Stri */ public static void setValidWriteIdList(Configuration conf, ValidWriteIdList validWriteIds) { conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, validWriteIds.toString()); - LOG.debug("Setting ValidWriteIdList: " + validWriteIds.toString() - + " isAcidTable: " + HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, false) - + " acidProperty: " + getAcidOperationalProperties(conf)); + if (LOG.isDebugEnabled()) { + LOG.debug("Setting ValidWriteIdList: " + validWriteIds.toString() + " isAcidTable: " + + HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, false) + " acidProperty: " + + getAcidOperationalProperties(conf)); + } } /** @@ -2593,8 +2590,7 @@ public static boolean isRawFormatFile(Path dataFile, FileSystem fs) throws IOExc } catch (FileFormatException ex) { //We may be parsing a delta for Insert-only table which may not even be an ORC file so //cannot have ROW_IDs in it. - LOG.debug("isRawFormat() called on " + dataFile + " which is not an ORC file: " + - ex.getMessage()); + LOG.debug("isRawFormat() called on {} which is not an ORC file", dataFile, ex); return true; } } @@ -2672,7 +2668,7 @@ public static int getAcidVersionFromMetaFile(Path deltaOrBaseDir, FileSystem fs) throws IOException { Path formatFile = getVersionFilePath(deltaOrBaseDir); if(!fs.exists(formatFile)) { - LOG.debug(formatFile + " not found, returning default: " + ORC_ACID_VERSION_DEFAULT); + LOG.debug("{} not found, returning default: {}", formatFile, ORC_ACID_VERSION_DEFAULT); return ORC_ACID_VERSION_DEFAULT; } try (FSDataInputStream inputStream = fs.open(formatFile)) { @@ -2861,16 +2857,14 @@ public static Long extractWriteId(Path file) { } String[] parts = fileName.split("_", 4); // e.g. delta_0000001_0000001_0000 or base_0000022 if (parts.length < 2) { - LOG.debug("Cannot extract write ID for a MM table: " + file - + " (" + Arrays.toString(parts) + ")"); + LOG.debug("Cannot extract write ID for a MM table: {} ({})", file, Arrays.toString(parts)); return null; } long writeId = -1; try { writeId = Long.parseLong(parts[1]); } catch (NumberFormatException ex) { - LOG.debug("Cannot extract write ID for a MM table: " + file - + "; parsing " + parts[1] + " got " + ex.getMessage()); + LOG.debug("Cannot extract write ID for a MM table: {}; parsing {}", file, parts[1], ex); return null; } return writeId; @@ -2982,7 +2976,7 @@ private static boolean isLockableTable(Table t) { // overwrite) than we need a shared. If it's update or delete then we // need a SEMI-SHARED. for (WriteEntity output : outputs) { - LOG.debug("output is null " + (output == null)); + LOG.debug("output is null {}", (output == null)); if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils .needsLock(output)) { // We don't lock files or directories. We also skip locking temp tables. @@ -3089,7 +3083,7 @@ Seems much cleaner if each stmt is identified as a particular HiveOperation (whi compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite()); LockComponent comp = compBuilder.build(); - LOG.debug("Adding lock component to lock request " + comp.toString()); + LOG.debug("Adding lock component to lock request {}", comp); lockComponents.add(comp); } return lockComponents; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java index 5d2093101e..24e0048ee2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java @@ -104,7 +104,7 @@ public RecordReader getRecordReader(InputSplit split, JobConf job, if (!errors.isEmpty()) { throw new InvalidInputException(errors); } - LOG.debug("Matches for " + dir + ": " + result); + LOG.debug("Matches for {}: {}", dir, result); LOG.info("Total input paths to process : " + result.size() + " from dir " + dir); return result.toArray(new FileStatus[result.size()]); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java index 1f72477666..471debf93e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java @@ -415,8 +415,7 @@ public int hashCode() { combine.createPool(job, f); poolMap.put(combinePathInputFormat, f); } else { - LOG.debug("CombineHiveInputSplit: pool is already created for " + path + - "; using filter path " + filterPath); + LOG.debug("CombineHiveInputSplit: pool is already created for {}; using filter path {}", path, filterPath); f.addPath(filterPath); } } else { @@ -464,7 +463,7 @@ public int hashCode() { CombineHiveInputSplit csplit = new CombineHiveInputSplit(job, is, pathToPartitionInfo); result.add(csplit); } - LOG.debug("Number of splits " + result.size()); + LOG.debug("Number of splits {}", result.size()); return result.toArray(new InputSplit[result.size()]); } @@ -539,11 +538,8 @@ public int hashCode() { // Store the previous value for the path specification String oldPaths = job.get(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR); - if (LOG.isDebugEnabled()) { - LOG.debug("The received input paths are: [" + oldPaths + - "] against the property " - + org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR); - } + LOG.debug("The received input paths are: [{}] against the property {}", oldPaths, + org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR); // Process the normal splits if (nonCombinablePaths.size() > 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java index f10460dfbe..80d5cb7cd0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveRecordReader.java @@ -111,9 +111,8 @@ private PartitionDesc extractSinglePartSpec(CombineHiveInputSplit hsplit) throws PartitionDesc part = null; Map, Map> cache = new HashMap<>(); for (Path path : hsplit.getPaths()) { - PartitionDesc otherPart = HiveFileFormatUtils.getFromPathRecursively( - pathToPartInfo, path, cache); - LOG.debug("Found spec for " + path + " " + otherPart + " from " + pathToPartInfo); + PartitionDesc otherPart = HiveFileFormatUtils.getFromPathRecursively(pathToPartInfo, path, cache); + LOG.debug("Found spec for {} {} from {}", path, otherPart, pathToPartInfo); if (part == null) { part = otherPart; } else if (otherPart != part) { // Assume we should have the exact same object. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java index 38b226f795..761f5a987e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java @@ -173,7 +173,7 @@ private void initIOContext(long startPos, boolean isBlockPointer, ioCxtRef.setCurrentBlockStart(startPos); ioCxtRef.setBlockPointer(isBlockPointer); ioCxtRef.setInputPath(inputPath); - LOG.debug("Processing file " + inputPath); // Logged at INFO in multiple other places. + LOG.debug("Processing file {}", inputPath); initDone = true; } @@ -404,8 +404,7 @@ private void setGenericUDFClassName(String genericUDFClassName) throws IOExcepti // Do nothing } else { // This is an unsupported operator - LOG.debug(genericUDFClassName + " is not the name of a supported class. " + - "Continuing linearly."); + LOG.debug("{} is not the name of a supported class. Continuing linearly.", genericUDFClassName); if (this.getIOContext().isBinarySearching()) { beginLinearSearch(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 218d6651af..cf3a2645f1 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -271,9 +271,7 @@ public void configure(JobConf job) { } return inputFormat; } - if (LOG.isDebugEnabled()) { - LOG.debug("Processing " + ifName); - } + LOG.debug("Processing {}", ifName); @SuppressWarnings("unchecked") LlapIo llapIo = LlapProxy.getIo(); @@ -315,9 +313,7 @@ public void configure(JobConf job) { private static boolean checkInputFormatForLlapEncode(Configuration conf, String ifName) { String formatList = HiveConf.getVar(conf, ConfVars.LLAP_IO_ENCODE_FORMATS); - if (LOG.isDebugEnabled()) { - LOG.debug("Checking " + ifName + " against " + formatList); - } + LOG.debug("Checking {} against {}", ifName, formatList); String[] formats = StringUtils.getStrings(formatList); if (formats != null) { for (String format : formats) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java index 3139b10d03..21bdd2d923 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java @@ -298,9 +298,7 @@ public static void translateSargToTableColIndexes( String newColName = RecordReaderImpl.encodeTranslatedSargColumn(rootColumn, colId); SearchArgumentFactory.setPredicateLeafColumn(pl, newColName); } - if (LOG.isDebugEnabled()) { - LOG.debug("SARG translated into " + sarg); - } + LOG.debug("SARG translated into {}", sarg); } private static OrcTail createOrcTailFromMs( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java index 202f78b81c..05d369d91f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java @@ -156,9 +156,7 @@ public SerDeStats getStats() { schema.addField(columnNames.get(i), OrcInputFormat.convertTypeInfo(columnTypes.get(i))); } - if (LOG.isDebugEnabled()) { - LOG.debug("ORC schema = " + schema); - } + LOG.debug("ORC schema = {}", schema); result.setSchema(schema); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java index f543418179..6be69d93cf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java @@ -290,7 +290,7 @@ public void next(OrcStruct next) throws IOException { OrcRecordUpdater.getOperation(nextRecord()) == OrcRecordUpdater.DELETE_OPERATION); // if this record is larger than maxKey, we need to stop if (getMaxKey() != null && getKey().compareRow(getMaxKey()) > 0) { - LOG.debug("key " + getKey() + " > maxkey " + getMaxKey()); + LOG.debug("key {} > maxkey {}", getKey(), getMaxKey()); nextRecord = null; getRecordReader().close(); } @@ -406,9 +406,7 @@ final boolean nextFromCurrentFile(OrcStruct next) throws IOException { } key.setValues(writeId, bucketProperty, nextRowId, writeId, false); if (getMaxKey() != null && key.compareRow(getMaxKey()) > 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("key " + key + " > maxkey " + getMaxKey()); - } + LOG.debug("key {} > maxkey {}", key, getMaxKey()); return false;//reached End Of Split } return true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index 4b339a6c58..ddbcef6ead 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -245,9 +245,7 @@ private static TypeDescription getTypeDescriptionFromTableProperties(Properties } } - if (LOG.isDebugEnabled()) { - LOG.debug("ORC schema = " + schema); - } + LOG.debug("ORC schema = {}", schema); return schema; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 1f5a9d7803..4a262bfd83 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -1932,9 +1932,7 @@ public void readIndexStreams(OrcIndex index, StripeInformation stripe, DiskRangeList indexRanges = planIndexReading(fileSchema, streams, true, physicalFileIncludes, sargColumns, version, index.getBloomFilterKinds()); if (indexRanges == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Nothing to read for stripe [" + stripe + "]"); - } + LOG.debug("Nothing to read for stripe [{}]", stripe); return; } ReadContext[] colCtxs = new ReadContext[physicalFileIncludes.length]; @@ -1943,7 +1941,7 @@ public void readIndexStreams(OrcIndex index, StripeInformation stripe, if (!physicalFileIncludes[i] && (sargColumns == null || !sargColumns[i])) continue; colCtxs[i] = new ReadContext(i, ++colRgIx); if (isTracingEnabled) { - LOG.trace("Creating context: " + colCtxs[i].toString()); + LOG.trace("Creating context: {}", colCtxs[i]); } trace.logColumnRead(i, colRgIx, ColumnEncoding.Kind.DIRECT); // Bogus encoding. } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java index 6d525ba269..3d807307c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java @@ -143,7 +143,7 @@ private void pushProjectionsAndFilters(final JobConf jobConf, } } } catch(UDFArgumentException ex) { - LOG.debug("Turn off filtering due to " + ex); + LOG.debug("Turn off filtering", ex); tableFilterExpr = null; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java index 8d3cb7c2de..4471542758 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/BaseVectorizedColumnReader.java @@ -211,13 +211,21 @@ private void readPageV1(DataPageV1 page) { this.definitionLevelColumn = new ValuesReaderIntIterator(dlReader); try { BytesInput bytes = page.getBytes(); - LOG.debug("page size " + bytes.size() + " bytes and " + pageValueCount + " records"); + if (LOG.isDebugEnabled()) { + LOG.debug("page size " + bytes.size() + " bytes and " + pageValueCount + " records"); + } ByteBufferInputStream in = bytes.toInputStream(); - LOG.debug("reading repetition levels at " + in.position()); + if (LOG.isDebugEnabled()) { + LOG.debug("reading repetition levels at " + in.position()); + } rlReader.initFromPage(pageValueCount, in); - LOG.debug("reading definition levels at " + in.position()); + if (LOG.isDebugEnabled()) { + LOG.debug("reading definition levels at " + in.position()); + } dlReader.initFromPage(pageValueCount, in); - LOG.debug("reading data at " + in.position()); + if (LOG.isDebugEnabled()) { + LOG.debug("reading data at " + in.position()); + } initDataReader(page.getValueEncoding(), in, page.getValueCount()); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + page + " in col " + descriptor, e); @@ -230,7 +238,9 @@ private void readPageV2(DataPageV2 page) { page.getRepetitionLevels()); this.definitionLevelColumn = newRLEIterator(descriptor.getMaxDefinitionLevel(), page.getDefinitionLevels()); try { - LOG.debug("page data size " + page.getData().size() + " bytes and " + pageValueCount + " records"); + if (LOG.isDebugEnabled()) { + LOG.debug("page data size " + page.getData().size() + " bytes and " + pageValueCount + " records"); + } initDataReader(page.getDataEncoding(), page.getData().toInputStream(), page.getValueCount()); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + page + " in col " + descriptor, e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lock/CompileLock.java b/ql/src/java/org/apache/hadoop/hive/ql/lock/CompileLock.java index 90fbfe4a44..1c663e9e68 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lock/CompileLock.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lock/CompileLock.java @@ -32,9 +32,6 @@ private static final Logger LOG = LoggerFactory.getLogger(CompileLock.class); - private static final String LOCK_ACQUIRED_MSG = "Acquired the compile lock."; - private static final String WAIT_LOCK_ACQUIRE_MSG = "Waiting to acquire compile lock: "; - private final Lock underlying; private final long defaultTimeout; @@ -65,21 +62,17 @@ private boolean tryAcquire(long timeout, TimeUnit unit) { // First shot without waiting. try { if (underlying.tryLock(0, unit)) { - LOG.debug(LOCK_ACQUIRED_MSG); + LOG.debug("Acquired the compile lock"); return aquired(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - if (LOG.isDebugEnabled()) { - LOG.debug("Interrupted Exception ignored", e); - } + LOG.debug("Interrupted Exception ignored", e); return failedToAquire(); } // If the first shot fails, then we log the waiting messages. - if (LOG.isDebugEnabled()) { - LOG.debug(WAIT_LOCK_ACQUIRE_MSG + command); - } + LOG.debug("Waiting to acquire compile lock: {}", command); if (timeout > 0) { try { @@ -98,7 +91,7 @@ private boolean tryAcquire(long timeout, TimeUnit unit) { underlying.lock(); } - LOG.debug(LOCK_ACQUIRED_MSG); + LOG.debug("Acquired the compile lock"); return aquired(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java index 4b6bc3e1e3..d16bd139c3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java @@ -214,7 +214,7 @@ public void unlock(HiveLock hiveLock) throws LockException { long lockId = ((DbHiveLock)hiveLock).lockId; boolean removed = false; try { - LOG.debug("Unlocking " + hiveLock); + LOG.debug("Unlocking {}", hiveLock); txnManager.getMS().unlock(lockId); //important to remove after unlock() in case it fails removed = locks.remove(hiveLock); @@ -226,7 +226,7 @@ public void unlock(HiveLock hiveLock) throws LockException { LOG.warn("Error Reporting hive client metastore unlock operation to Metrics system", e); } } - LOG.debug("Removed a lock " + removed); + LOG.debug("Removed a lock {}", removed); } catch (NoSuchLockException e) { //if metastore has no record of this lock, it most likely timed out; either way //there is no point tracking it here any longer @@ -241,9 +241,7 @@ public void unlock(HiveLock hiveLock) throws LockException { e); } finally { - if(removed) { - LOG.debug("Removed a lock " + hiveLock); - } + LOG.debug("Removed a lock {} [{}]", hiveLock, removed); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index b4dac4346e..13e91f18a6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -416,7 +416,7 @@ LockState acquireLocks(QueryPlan plan, Context ctx, String username, boolean isB // Make sure we need locks. It's possible there's nothing to lock in // this operation. if(plan.getInputs().isEmpty() && plan.getOutputs().isEmpty()) { - LOG.debug("No locks needed for queryId=" + queryId); + LOG.debug("No locks needed for queryId={}", queryId); return null; } List lockComponents = AcidUtils.makeLockComponents(plan.getOutputs(), plan.getInputs(), conf); @@ -425,7 +425,7 @@ LockState acquireLocks(QueryPlan plan, Context ctx, String username, boolean isB //It's possible there's nothing to lock even if we have w/r entities. if(lockComponents.isEmpty()) { - LOG.debug("No locks needed for queryId=" + queryId); + LOG.debug("No locks needed for queryId={}", queryId); return null; } rqstBuilder.addLockComponents(lockComponents); @@ -453,7 +453,7 @@ LockState acquireLocks(QueryPlan plan, Context ctx, String username, boolean isB compBuilder.setDbName(GLOBAL_LOCKS); compBuilder.setTableName(lockName); globalLocks.add(compBuilder.build()); - LOG.debug("Adding global lock: " + lockName); + LOG.debug("Adding global lock: {}", lockName); } return globalLocks; } @@ -529,7 +529,7 @@ public void commitTxn() throws LockException { try { // do all new clear in clearLocksAndHB method to make sure that same code is there for replCommitTxn flow. clearLocksAndHB(); - LOG.debug("Committing txn " + JavaUtils.txnIdToString(txnId)); + LOG.debug("Committing txn {}", JavaUtils.txnIdToString(txnId)); getMS().commitTxn(txnId); } catch (NoSuchTxnException e) { LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId)); @@ -571,7 +571,7 @@ public void rollbackTxn() throws LockException { try { lockMgr.clearLocalLockRecords(); - LOG.debug("Rolling back " + JavaUtils.txnIdToString(txnId)); + LOG.debug("Rolling back {}", JavaUtils.txnIdToString(txnId)); // Re-checking as txn could have been closed, in the meantime, by a competing thread. if (isTxnOpen()) { @@ -679,8 +679,10 @@ private Heartbeater startHeartbeat(long initialDelay) throws LockException { Heartbeater heartbeater = new Heartbeater(this, conf, queryId, currentUser); heartbeatTask = startHeartbeat(initialDelay, heartbeatInterval, heartbeater); - LOG.debug("Started heartbeat with delay/interval = " + initialDelay + "/" + heartbeatInterval + - " " + TimeUnit.MILLISECONDS + " for query: " + queryId); + if (LOG.isDebugEnabled()) { + LOG.debug("Started heartbeat with delay/interval = " + initialDelay + "/" + heartbeatInterval + " " + + TimeUnit.MILLISECONDS + " for query: " + queryId); + } return heartbeater; } finally { @@ -1049,7 +1051,7 @@ public void run() { if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER)) { throw new LockException(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER.name() + "=true"); } - LOG.debug("Heartbeating...for currentUser: " + currentUser); + LOG.debug("Heartbeating...for currentUser: {}", currentUser); currentUser.doAs((PrivilegedExceptionAction) () -> { txnMgr.heartbeat(); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java index 7820013ab0..3c2834cec3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java @@ -170,7 +170,7 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username, DriverSta if (!input.needsLock()) { continue; } - LOG.debug("Adding " + input.getName() + " to list of lock inputs"); + LOG.debug("Adding {} to list of lock inputs", input.getName()); if (input.getType() == ReadEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, input.getDatabase(), null, null, HiveLockMode.SHARED)); @@ -189,7 +189,7 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username, DriverSta if (lockMode == null) { continue; } - LOG.debug("Adding " + output.getName() + " to list of lock outputs"); + LOG.debug("Adding {} to list of lock outputs", output.getName()); List lockObj = null; if (output.getType() == WriteEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, null, lockMode)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 1f9fb3b897..57fcf9e428 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2591,10 +2591,10 @@ public static void listFilesInsideAcidDirectory(Path acidDir, FileSystem srcFs, FileStatus[] acidFiles; acidFiles = srcFs.listStatus(acidDir); if (acidFiles == null) { - LOG.debug("No files added by this query in: " + acidDir); + LOG.debug("No files added by this query in: {}", acidDir); return; } - LOG.debug("Listing files under " + acidDir); + LOG.debug("Listing files under {}", acidDir); for (FileStatus acidFile : acidFiles) { // need to list out only files, ignore folders. if (!acidFile.isDirectory()) { @@ -2625,7 +2625,7 @@ private void setStatsPropAndAlterPartition(boolean resetStatistics, Table tbl, if (!resetStatistics) { ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); } - LOG.debug("Altering existing partition " + newTPart.getSpec()); + LOG.debug("Altering existing partition {}", newTPart.getSpec()); getSynchronizedMSC().alter_partition(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), newTPart.getTPartition(), new EnvironmentContext(), tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList()); @@ -3145,13 +3145,13 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType perfLogger.PerfLogEnd("MoveTask", PerfLogger.FILE_MOVES); } if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { - LOG.debug("setting table statistics false for " + tbl.getDbName() + "." + tbl.getTableName()); + LOG.debug("Setting table statistics false for `{}`.`{}`", tbl.getDbName(), tbl.getTableName()); StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); } //column stats will be inaccurate if (resetStatistics) { - LOG.debug("Clearing table statistics for " + tbl.getDbName() + "." + tbl.getTableName()); + LOG.debug("Clearing table statistics for `{}`.`{}`", tbl.getDbName(), tbl.getTableName()); StatsSetupConst.clearColumnStatsState(tbl.getParameters()); } @@ -3305,8 +3305,7 @@ public Partition getPartition(Table tbl, Map partSpec, try { if (forceCreate) { if (tpart == null) { - LOG.debug("creating partition for table " + tbl.getTableName() - + " with partition spec : " + partSpec); + LOG.debug("creating partition for table {} with partition spec : {}", tbl.getTableName(), partSpec); try { tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals); } catch (AlreadyExistsException aee) { @@ -4130,7 +4129,7 @@ private static boolean isSubDir(Path srcf, Path destf, FileSystem srcFs, FileSys boolean isInTest = HiveConf.getBoolVar(srcFs.getConf(), ConfVars.HIVE_IN_TEST); // In the automation, the data warehouse is the local file system based. - LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2); + LOG.debug("The source path is {} and the destination path is {}", fullF1, fullF2); if (isInTest) { return fullF1.startsWith(fullF2); } @@ -4147,12 +4146,11 @@ private static boolean isSubDir(Path srcf, Path destf, FileSystem srcFs, FileSys // If both schema information are provided, they should be the same. if (schemaSrcf != null && schemaDestf != null && !schemaSrcf.equals(schemaDestf)) { - LOG.debug("The source path's schema is " + schemaSrcf + - " and the destination path's schema is " + schemaDestf + "."); + LOG.debug("The source path's schema is {} and the destination path's schema is {}", schemaSrcf, schemaDestf); return false; } - LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2); + LOG.debug("The source path is {} and the destination path is {}", fullF1, fullF2); return fullF1.startsWith(fullF2); } @@ -4366,7 +4364,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, // to delete the file first if (replace && !srcIsSubDirOfDest) { destFs.delete(destf, true); - LOG.debug("The path " + destf.toString() + " is deleted"); + LOG.debug("The path {} is deleted", destf); } } catch (FileNotFoundException ignore) { } @@ -4379,7 +4377,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, } else { if (needToCopy(srcf, destf, srcFs, destFs, configuredOwner, isManaged)) { //copy if across file system or encryption zones. - LOG.debug("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different."); + LOG.debug("Copying source {} to {} because HDFS encryption zones are different.", srcf, destf); return FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf, true, // delete source replace, // overwrite destination @@ -4393,9 +4391,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) : null; if (destIsSubDirOfSrc && !destFs.exists(destf)) { - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace("Creating " + destf); - } + Utilities.FILE_OP_LOGGER.trace("Creating {}", destf); destFs.mkdirs(destf); } /* Move files one by one because source is a subdirectory of destination */ @@ -4875,8 +4871,7 @@ public Boolean call() throws Exception { private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, boolean purge, PathFilter pathFilter, boolean isNeedRecycle) throws HiveException { - Utilities.FILE_OP_LOGGER.debug("Deleting old paths for replace in " + destPath - + " and old path " + oldPath); + Utilities.FILE_OP_LOGGER.debug("Deleting old paths for replace in {} and old path {}", destPath, oldPath); boolean isOldPathUnderDestf = false; try { FileSystem oldFs = oldPath.getFileSystem(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 136709c6dc..405eeefffc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -572,7 +572,7 @@ public boolean isStoredAsSubDirectories() { } public List getSkewedColNames() { - LOG.debug("sd is " + tPartition.getSd().getClass().getName()); + LOG.debug("sd is {}", tPartition.getSd().getClass().getName()); return tPartition.getSd().getSkewedInfo().getSkewedColNames(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 3dcf876af3..f11dcfb89e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -827,7 +827,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo private static Map> getTempTables(String msg) { SessionState ss = SessionState.get(); if (ss == null) { - LOG.debug("No current SessionState, skipping temp tables for " + msg); + LOG.debug("No current SessionState, skipping temp tables for {}", msg); return Collections.emptyMap(); } return ss.getTempTables(); @@ -837,7 +837,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo String tableName) { SessionState ss = SessionState.get(); if (ss == null) { - LOG.debug("No current SessionState, skipping temp tables for " + + LOG.debug("No current SessionState, skipping temp tables for {}", Warehouse.getQualifiedName(dbName, tableName)); return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/events/NotificationEventPoll.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/events/NotificationEventPoll.java index 010f00c7d3..bf867aba12 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/events/NotificationEventPoll.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/events/NotificationEventPoll.java @@ -138,7 +138,7 @@ public void run() { while (evIter.hasNext()) { NotificationEvent event = evIter.next(); - LOG.debug("Event: " + event); + LOG.debug("Event: {}", event); for (EventConsumer eventConsumer : eventConsumers) { try { eventConsumer.accept(event); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java index d69df776ae..f792bfea09 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java @@ -263,7 +263,7 @@ protected boolean checkConvertBucketMapJoin( Table tbl = tso.getConf().getTableMetadata(); if (AcidUtils.isInsertOnlyTable(tbl.getParameters())) { - Utilities.FILE_OP_LOGGER.debug("No bucketed join on MM table " + tbl.getTableName()); + Utilities.FILE_OP_LOGGER.debug("No bucketed join on MM table {}", tbl.getTableName()); return false; } if (tbl.isPartitioned()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java index 5dc6bd08e4..2e6827e56a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java @@ -576,7 +576,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, List colLists = new ArrayList<>(); List keys = conf.getKeyCols(); - LOG.debug("Reduce Sink Operator " + op.getIdentifier() + " key:" + keys); + LOG.debug("Reduce Sink Operator {} key:{}", op.getIdentifier(), keys); for (ExprNodeDesc key : keys) { colLists = mergeFieldNodesWithDesc(colLists, key); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java index b111e4f428..e794a59efc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java @@ -97,7 +97,7 @@ public ConstantPropagateProcCtx(ConstantPropagateOption option) { return constants; } RowSchema rs = op.getSchema(); - LOG.debug("Getting constants of op:" + op + " with rs:" + rs); + LOG.debug("Getting constants of op: {} with rs: {}", op, rs); if (op.getParentOperators() == null) { return constants; @@ -117,7 +117,7 @@ public ConstantPropagateProcCtx(ConstantPropagateOption option) { for (Operator parent : op.getParentOperators()) { Map constMap = opToConstantExprs.get(parent); if (constMap == null) { - LOG.debug("Constant of Op " + parent.getOperatorId() + " is not found"); + LOG.debug("Constant of Op {} is not found", parent.getOperatorId()); areAllParentsContainConstant = false; } else { noParentsContainConstant = false; @@ -127,7 +127,7 @@ public ConstantPropagateProcCtx(ConstantPropagateOption option) { entry.getValue()); } parentsToConstant.add(map); - LOG.debug("Constant of Op " + parent.getOperatorId() + " " + constMap); + LOG.debug("Constant of Op {} {}", parent.getOperatorId(), constMap); } } if (noParentsContainConstant) { @@ -234,7 +234,7 @@ public ConstantPropagateProcCtx(ConstantPropagateOption option) { } } } - LOG.debug("Offering constants " + constants.keySet() + " to operator " + op.toString()); + LOG.debug("Offering constants {} to operator {}", constants.keySet(), op); return constants; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index d8d8cae936..d5be7436d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -1518,7 +1518,7 @@ private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, Optim ReduceSinkOperator bigTableParentRS = (ReduceSinkOperator) (joinOp.getParentOperators().get(bigTablePos)); numReducers = bigTableParentRS.getConf().getNumReducers(); - LOG.debug("Real big table reducers = " + numReducers); + LOG.debug("Real big table reducers = {}", numReducers); MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversion, false); if (mapJoinOp != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 1ea3bd357f..3f14c8c6c1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -678,9 +678,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set stack, NodeProcessorCtx procCtx, if(sel.isIdentitySelect()) { parent.removeChildAndAdoptItsChildren(sel); - LOG.debug("Identity project remover optimization removed : " + sel); + LOG.debug("Identity project remover optimization removed : {}", sel); } return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java index 4f1c9b2640..1ce471727a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java @@ -381,7 +381,7 @@ private static boolean checkFullOuterMapJoinCompatible(HiveConf hiveConf, List exprList = mapEntry.getValue(); for (ExprNodeDesc expr : exprList) { if (!(expr instanceof ExprNodeColumnDesc)) { - LOG.debug("FULL OUTER MapJoin: only column expressions are supported " + expr.toString()); + LOG.debug("FULL OUTER MapJoin: only column expressions are supported {}", expr); return false; } } @@ -399,20 +399,14 @@ private static boolean checkFullOuterMapJoinCompatible(HiveConf hiveConf, // Verify we handle the key column types for an optimized table. This is the effectively // the same check used in Tez HashTableLoader. if (!MapJoinKey.isSupportedField(typeInfo)) { - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: " + - " key type " + typeInfo.toString() + " not supported"); - } + LOG.debug("FULL OUTER MapJoin not enabled: key type {} not supported", typeInfo); return false; } } } if (onExpressionHasNullSafes(joinDesc)) { - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: " + - "nullsafe not supported"); - } + LOG.debug("FULL OUTER MapJoin not enabled: nullsafe not supported"); return false; } @@ -421,19 +415,13 @@ private static boolean checkFullOuterMapJoinCompatible(HiveConf hiveConf, boolean isHybridHashJoin = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN); if (isVectorizationMapJoinNativeEnabled && isHybridHashJoin) { - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: " + - "Native Vector MapJoin and Hybrid Grace not supported"); - } + LOG.debug("FULL OUTER MapJoin not enabled: Native Vector MapJoin and Hybrid Grace not supported"); return false; } if (joinDesc.getResidualFilterExprs() != null && joinDesc.getResidualFilterExprs().size() != 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: " + - "non-equi joins not supported"); - } + LOG.debug("FULL OUTER MapJoin not enabled: non-equi joins not supported"); return false; } @@ -462,9 +450,7 @@ public static boolean precheckFullOuter(HiveConf hiveConf, JoinOperator joinOp) if (conds.length > 1) { // No multiple condition FULL OUTER MapJoin. - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: multiple JOIN conditions not supported"); - } + LOG.debug("FULL OUTER MapJoin not enabled: multiple JOIN conditions not supported"); return false; } @@ -488,10 +474,7 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join case NONE: { if (!isEnabled) { - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: " + - HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname + " is false"); - } + LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname); return false; } } @@ -527,9 +510,7 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join if (!isTezEngine) { // Only Tez for now. - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: Only Tez engine supported"); - } + LOG.debug("FULL OUTER MapJoin not enabled: Only Tez engine supported"); return false; } @@ -541,10 +522,7 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join hiveConf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); if (!isOptimizedHashTableEnabled) { - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin not enabled: " + - HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE.varname + " is false"); - } + LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE.varname); return false; } @@ -553,9 +531,7 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join return false; } - if (LOG.isDebugEnabled()) { - LOG.debug("FULL OUTER MapJoin enabled"); - } + LOG.debug("FULL OUTER MapJoin enabled"); return true; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java index 49d915ce68..c9a6c6008c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java @@ -372,10 +372,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // where T1.A and T2.B are both partitioning columns. // However, these expressions should not be considered as valid expressions for separation. if (!hasAtleastOneSubExprWithPartColOrVirtualColWithOneTableAlias(children.get(0))) { - if (LOG.isDebugEnabled()) { - LOG.debug("Partition columns not separated for " + fd + - ", there are no expression containing partition columns in struct fields"); - } + LOG.debug( + "Partition columns not separated for {}, there are no expression containing partition columns in struct fields", + fd); return null; } @@ -383,11 +382,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // containing constants or only partition columns coming from same table. // If so, we need not perform this optimization and we should bail out. if (hasAllSubExprWithConstOrPartColOrVirtualColWithOneTableAlias(children.get(0))) { - if (LOG.isDebugEnabled()) { - LOG.debug("Partition columns not separated for " + fd + - ", all fields are expressions containing constants or only partition columns" - + "coming from same table"); - } + LOG.debug("Partition columns not separated for {}, all fields are expressions containing constants or" + + " only partition columns coming from same table", fd); return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java index f09c8af3ac..170fd8ce42 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java @@ -108,9 +108,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ExprNodeDesc newPredicate = generateInClause(predicate); if (newPredicate != null) { // Replace filter in current FIL with new FIL - if (LOG.isDebugEnabled()) { - LOG.debug("Generated new predicate with IN clause: " + newPredicate); - } + LOG.debug("Generated new predicate with IN clause: {}",); filterOp.getConf().setPredicate(newPredicate); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java index 51a5ca0c53..3f03658a75 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java @@ -127,9 +127,7 @@ private void collectFileSinkDescs(Operator leaf, Set acidSinks) FileSinkDesc fsd = ((FileSinkOperator) leaf).getConf(); if(fsd.getWriteType() != AcidUtils.Operation.NOT_ACID) { if(acidSinks.add(fsd)) { - if(LOG.isDebugEnabled()) { - LOG.debug("Found Acid Sink: " + fsd.getDirName()); - } + LOG.debug("Found Acid Sink: {}", fsd.getDirName()); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java index c21dd19c25..07781df1a4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SetReducerParallelism.java @@ -67,7 +67,7 @@ public Object process(Node nd, Stack stack, if (context.visitedReduceSinks.contains(sink)) { // skip walking the children - LOG.debug("Already processed reduce sink: " + sink.getName()); + LOG.debug("Already processed reduce sink: {}", sink.getName()); return true; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index c98417aa7b..9f26da239d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -260,15 +260,17 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, for (int order : sortOrder) { sortNullOrder.add(order == 1 ? 0 : 1); // for asc, nulls first; for desc, nulls last } - LOG.debug("Got sort order"); - for (int i : sortPositions) { - LOG.debug("sort position " + i); - } - for (int i : sortOrder) { - LOG.debug("sort order " + i); - } - for (int i : sortNullOrder) { - LOG.debug("sort null order " + i); + if (LOG.isDebugEnabled()) { + LOG.debug("Got sort order"); + for (int i : sortPositions) { + LOG.debug("sort position " + i); + } + for (int i : sortOrder) { + LOG.debug("sort order " + i); + } + for (int i : sortNullOrder) { + LOG.debug("sort null order " + i); + } } // update file sink descriptor diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TablePropertyEnrichmentOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TablePropertyEnrichmentOptimizer.java index 387d34cc44..4454c1b29e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TablePropertyEnrichmentOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/TablePropertyEnrichmentOptimizer.java @@ -106,9 +106,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Obje Table table = tsOp.getConf().getTableMetadata().getTTable(); Map originalTableParameters = getTableParameters(table); - if (LOG.isDebugEnabled()) { - LOG.debug("Original Table parameters: " + originalTableParameters); - } + LOG.debug("Original Table parameters: {}", originalTableParameters); Properties clonedTableParameters = new Properties(); clonedTableParameters.putAll(originalTableParameters); @@ -122,18 +120,16 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Obje if (context.serdeClassesUnderConsideration.contains(deserializerClassName)) { deserializer.initialize(context.conf, clonedTableParameters); - LOG.debug("SerDe init succeeded for class: " + deserializerClassName); + LOG.debug("SerDe init succeeded for class: {}", deserializerClassName); for (Map.Entry property : clonedTableParameters.entrySet()) { if (!property.getValue().equals(originalTableParameters.get(property.getKey()))) { - LOG.debug("Resolving changed parameters! key=" + property.getKey() + ", value=" + property.getValue()); + LOG.debug("Resolving changed parameters! {}", property); table.getParameters().put((String) property.getKey(), (String) property.getValue()); } } } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipping prefetch for " + deserializerClassName); - } + LOG.debug("Skipping prefetch for {}", deserializerClassName); } } catch(Throwable t) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java index e8b2c37089..8abf21bb69 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java @@ -139,7 +139,7 @@ public void onMatch(RelOptRuleCall call) { Preconditions.checkArgument(argListSets.size() > 0, "containsDistinctCall lied"); if (numCountDistinct > 1 && numCountDistinct == aggregate.getAggCallList().size()) { - LOG.debug("Trigger countDistinct rewrite. numCountDistinct is " + numCountDistinct); + LOG.debug("Trigger countDistinct rewrite. numCountDistinct is {}", numCountDistinct); // now positions contains all the distinct positions, i.e., $5, $4, $6 // we need to first sort them as group by set // and then get their position later, i.e., $4->1, $5->2, $6->3 @@ -148,7 +148,7 @@ public void onMatch(RelOptRuleCall call) { try { call.transformTo(convert(aggregate, argListList, newGroupSet.build())); } catch (CalciteSemanticException e) { - LOG.debug(e.toString()); + LOG.debug("Error", e); throw new RuntimeException(e); } return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java index 6b0614ccaf..0698aad55d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java @@ -120,9 +120,7 @@ public RexNode visitCall(RexCall call) { try { hiveUDF.close(); } catch (IOException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Exception in closing " + hiveUDF, e); - } + LOG.debug("Exception in closing {}", hiveUDF, e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCRexCallValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCRexCallValidator.java index 7c72bd3f03..50a1f06b87 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCRexCallValidator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCRexCallValidator.java @@ -50,7 +50,7 @@ private JdbcRexCallValidatorVisitor(SqlDialect dialect) { private boolean validRexCall(RexCall call) { if (call instanceof RexOver) { - LOG.debug("RexOver operator push down is not supported for now with the following operator:" + call); + LOG.debug("RexOver operator push down is not supported for now with the following operator: {}", call); return false; } final SqlOperator operator = call.getOperator(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java index ac7f50102e..1607d43e25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierUtil.java @@ -68,9 +68,9 @@ protected static void fixTopOBSchema(final RelNode rootRel, if (collationInputRefs.contains(i)) { RexNode obyExpr = obChild.getChildExps().get(i); if (obyExpr instanceof RexCall) { - LOG.debug("Old RexCall : " + obyExpr); + LOG.debug("Old RexCall : {}", obyExpr); obyExpr = adjustOBSchema((RexCall) obyExpr, obChild, resultSchema); - LOG.debug("New RexCall : " + obyExpr); + LOG.debug("New RexCall : {}", obyExpr); } inputRefToCallMapBldr.put(i, obyExpr); }