diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java index fd3d457..7bd686f 100644 --- a/common/src/java/org/apache/hive/http/HttpServer.java +++ b/common/src/java/org/apache/hive/http/HttpServer.java @@ -423,6 +423,7 @@ private void initializeWebServer(final Builder b, int queueSize) { addServlet("jmx", "/jmx", JMXJsonServlet.class); addServlet("conf", "/conf", ConfServlet.class); addServlet("stacks", "/stacks", StackServlet.class); + addServlet("conflog", "/conflog", Log4j2ConfiguratorServlet.class); for (Pair> p : b.servlets) { addServlet(p.getFirst(), "/" + p.getFirst(), p.getSecond()); diff --git a/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java new file mode 100644 index 0000000..8042f21 --- /dev/null +++ b/common/src/java/org/apache/hive/http/Log4j2ConfiguratorServlet.java @@ -0,0 +1,275 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hive.http; + +import java.io.IOException; +import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.codehaus.jackson.map.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A servlet to configure log4j2. + *
+ * HTTP GET returns all loggers and it's log level in JSON formatted response. + *
+ * HTTP POST is used for configuring the loggers. POST data should be in the same format as GET's response. + * To configure (add/update existing loggers), use HTTP POST with logger names and level in the following JSON format. + * + *
+ *
+ * {
+ *  "loggers": [ {
+ *    "logger" : "",
+ *    "level" : "INFO"
+ *  }, {
+ *    "logger" : "LlapIoOrc",
+ *    "level" : "WARN"
+ *  }, {
+ *    "logger" : "org.apache.zookeeper.server.NIOServerCnxn",
+ *    "level" : "WARN"
+ *  }]
+ * }
+ * 
+ * + *
+ * Example usage: + *
  • + * Returns all loggers with levels in JSON format: + *
    + *      curl http://hostame:port/conflog
    + *    
    + *
  • + *
  • + * Set root logger to INFO: + *
    + *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [ { "logger" : "", "level" : "INFO" } ] }'
    + *      http://hostame:port/conflog
    + *    
    + *
  • + *
  • + * Set logger with level: + *
    + *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
    + *      { "logger" : "LlapIoOrc", "level" : "INFO" } ] }' http://hostame:port/conflog
    + *    
    + *
  • + *
  • + * Set log level for all classes under a package: + *
    + *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [
    + *      { "logger" : "org.apache.orc", "level" : "INFO" } ] }' http://hostame:port/conflog
    + *    
    + *
  • + *
  • + * Set log levels for multiple loggers: + *
    + *      curl -v -H "Content-Type: application/json" -X POST -d '{ "loggers" : [ { "logger" : "", "level" : "INFO" },
    + *      { "logger" : "LlapIoOrc", "level" : "WARN" },
    + *      { "logger" : "org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon", "level" : "INFO" },
    + *      { "logger" : "org.apache.orc", "level" : "INFO" } ] }' http://hostame:port/conflog
    + *    
    + *
  • + *
    + * Response Status Codes: + *
    + *
  • 200 - OK : If the POST data is valid and if the request succeeds or if GET request succeeds.
  • + *
  • 401 - UNAUTHORIZED : If the user does not have privileges to access instrumentation servlets. + * Refer hadoop.security.instrumentation.requires.admin config for more info.
  • + *
  • 400 - BAD_REQUEST : If the POST data is not a valid JSON.
  • + *
  • 500 - INTERNAL_SERVER_ERROR : If GET requests throws any IOException during JSON output generation.
  • + */ +public class Log4j2ConfiguratorServlet extends HttpServlet { + private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(Log4j2ConfiguratorServlet.class); + private static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods"; + private static final String ALLOWED_METHODS = "POST, GET"; + private static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"; + private static final String CONTENT_TYPE_JSON_UTF8 = "application/json; charset=utf8"; + + private transient LoggerContext context; + private transient Configuration conf; + + private static class ConfLoggers { + private List loggers; + + public ConfLoggers() { + this.loggers = new ArrayList<>(); + } + + public List getLoggers() { + return loggers; + } + + public void setLoggers(final List loggers) { + this.loggers = loggers; + } + } + + private static class ConfLogger { + private String logger; + private String level; + + // no-arg ctor required for JSON deserialization + public ConfLogger() { + this(null, null); + } + + public ConfLogger(String logger, String level) { + this.logger = logger; + this.level = level; + } + + public String getLogger() { + return logger == null ? logger : logger.trim(); + } + + public void setLogger(final String logger) { + this.logger = logger; + } + + public String getLevel() { + return level == null ? level : level.trim().toUpperCase(); + } + + public void setLevel(final String level) { + this.level = level; + } + } + + /** + * Initialize this servlet. + */ + @Override + public void init() throws ServletException { + context = (LoggerContext) LogManager.getContext(false); + conf = context.getConfiguration(); + } + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { + response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); + return; + } + + setResponseHeader(response); + + // list the loggers and their levels + listLoggers(response); + } + + private void setResponseHeader(final HttpServletResponse response) { + response.setContentType(CONTENT_TYPE_JSON_UTF8); + response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS); + response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*"); + } + + @Override + protected void doPost(final HttpServletRequest request, final HttpServletResponse response) + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { + response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); + return; + } + setResponseHeader(response); + + String dataJson = request.getReader().lines().collect(Collectors.joining()); + ObjectMapper objectMapper = new ObjectMapper(); + try { + ConfLoggers confLoggers = objectMapper.readValue(dataJson, ConfLoggers.class); + configureLogger(confLoggers); + } catch (IOException e) { + response.setStatus(HttpServletResponse.SC_BAD_REQUEST); + LOG.error("Error configuring log4j2 via /conflog endpoint.", e); + return; + } + + response.setStatus(HttpServletResponse.SC_OK); + } + + private void configureLogger(final ConfLoggers confLoggers) { + if (confLoggers != null) { + for (ConfLogger logger : confLoggers.getLoggers()) { + String loggerName = logger.getLogger(); + Level logLevel = Level.getLevel(logger.getLevel()); + if (logLevel == null) { + LOG.warn("Invalid log level: {} for logger: {}. Ignoring reconfiguration.", loggerName, logger.getLevel()); + continue; + } + + LoggerConfig loggerConfig = conf.getLoggerConfig(loggerName); + // if the logger name is not found, root logger is returned. We don't want to change root logger level + // since user either requested a new logger or specified invalid input. In which, we will add the logger + // that user requested. + if (!loggerName.equals(LogManager.ROOT_LOGGER_NAME) && + loggerConfig.getName().equals(LogManager.ROOT_LOGGER_NAME)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Requested logger ({}) not found. Adding as new logger with {} level", loggerName, logLevel); + } + // requested logger not found. Add the new logger with the requested level + conf.addLogger(loggerName, new LoggerConfig(loggerName, logLevel, true)); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Updating logger ({}) to {} level", loggerName, logLevel); + } + // update the log level for the specified logger + loggerConfig.setLevel(logLevel); + } + } + context.updateLoggers(conf); + } + } + + private void listLoggers(final HttpServletResponse response) throws IOException { + PrintWriter writer = null; + try { + writer = response.getWriter(); + ConfLoggers confLoggers = new ConfLoggers(); + Collection loggerConfigs = conf.getLoggers().values(); + loggerConfigs.forEach(lc -> confLoggers.getLoggers().add(new ConfLogger(lc.getName(), lc.getLevel().toString()))); + ObjectMapper objectMapper = new ObjectMapper(); + objectMapper.writerWithDefaultPrettyPrinter().writeValue(writer, confLoggers); + } catch (IOException e) { + LOG.error("Caught an exception while processing Log4j2 configuration request", e); + response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + return; + } finally { + if (writer != null) { + writer.close(); + } + } + response.setStatus(HttpServletResponse.SC_OK); + } +} diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java index 7f8c947..dd459b1 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java @@ -84,8 +84,6 @@ public class TaskExecutorService extends AbstractService implements Scheduler, SchedulerFragmentCompletingListener { private static final Logger LOG = LoggerFactory.getLogger(TaskExecutorService.class); - private static final boolean isInfoEnabled = LOG.isInfoEnabled(); - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); private static final String TASK_EXECUTOR_THREAD_NAME_FORMAT = "Task-Executor-%d"; private static final String WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT = "Wait-Queue-Scheduler-%d"; private static final long PREEMPTION_KILL_GRACE_MS = 500; // 500ms @@ -294,7 +292,7 @@ public void run() { // (numSlotsAvailable can go negative, if the callback after the thread completes is delayed) boolean shouldWait = numSlotsAvailable.get() <= 0 && lastKillTimeMs == null; if (task.getTaskRunnerCallable().canFinish()) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Attempting to schedule task {}, canFinish={}. Current state: " + "preemptionQueueSize={}, numSlotsAvailable={}, waitQueueSize={}", task.getRequestId(), task.getTaskRunnerCallable().canFinish(), @@ -335,7 +333,7 @@ public void run() { lock.wait(PREEMPTION_KILL_GRACE_SLEEP_MS); } } else { - if (isDebugEnabled && lastKillTimeMs != null) { + if (LOG.isDebugEnabled() && lastKillTimeMs != null) { LOG.debug("Grace period ended for the previous kill; preemtping more tasks"); } if (handleScheduleAttemptedRejection(task)) { @@ -406,18 +404,18 @@ public SubmissionState schedule(TaskRunnerCallable task) { if (evictedTask == null || !evictedTask.equals(taskWrapper)) { knownTasks.put(taskWrapper.getRequestId(), taskWrapper); taskWrapper.setIsInWaitQueue(true); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("{} added to wait queue. Current wait queue size={}", task.getRequestId(), waitQueue.size()); } result = evictedTask == null ? SubmissionState.ACCEPTED : SubmissionState.EVICTED_OTHER; - if (isDebugEnabled && evictedTask != null) { + if (LOG.isDebugEnabled() && evictedTask != null) { LOG.debug("Eviction: {} {} {}", taskWrapper, result, evictedTask); } } else { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info( "wait queue full, size={}. numSlotsAvailable={}, runningFragmentCount={}. {} not added", waitQueue.size(), numSlotsAvailable.get(), runningFragmentCount.get(), task.getRequestId()); @@ -426,7 +424,7 @@ public SubmissionState schedule(TaskRunnerCallable task) { result = SubmissionState.REJECTED; - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("{} is {} as wait queue is full", taskWrapper.getRequestId(), result); } if (metrics != null) { @@ -440,7 +438,7 @@ public SubmissionState schedule(TaskRunnerCallable task) { // after some other submission has evicted it. boolean stateChanged = !taskWrapper.maybeRegisterForFinishedStateNotifications(canFinish); if (stateChanged) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Finishable state of {} updated to {} during registration for state updates", taskWrapper.getRequestId(), !canFinish); } @@ -455,12 +453,12 @@ public SubmissionState schedule(TaskRunnerCallable task) { // Register for state change notifications so that the waitQueue can be re-ordered correctly // if the fragment moves in or out of the finishable state. - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Wait Queue: {}", waitQueue); } if (evictedTask != null) { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("{} evicted from wait queue in favor of {} because of lower priority", evictedTask.getRequestId(), task.getRequestId()); } @@ -503,7 +501,7 @@ public void killFragment(String fragmentId) { // Can be null since the task may have completed meanwhile. if (taskWrapper != null) { if (taskWrapper.isInWaitQueue()) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Removing {} from waitQueue", fragmentId); } taskWrapper.setIsInWaitQueue(false); @@ -514,7 +512,7 @@ public void killFragment(String fragmentId) { } } if (taskWrapper.isInPreemptionQueue()) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Removing {} from preemptionQueue", fragmentId); } removeFromPreemptionQueue(taskWrapper); @@ -558,7 +556,7 @@ public void fragmentCompleting(String fragmentId, State state) { @VisibleForTesting /** Assumes the epic lock is already taken. */ void tryScheduleUnderLock(final TaskWrapper taskWrapper) throws RejectedExecutionException { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Attempting to execute {}", taskWrapper); } ListenableFuture future = executorService.submit( @@ -572,7 +570,7 @@ void tryScheduleUnderLock(final TaskWrapper taskWrapper) throws RejectedExecutio Futures.addCallback(future, wrappedCallback, executionCompletionExecutorService); boolean canFinish = taskWrapper.getTaskRunnerCallable().canFinish(); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("{} scheduled for execution. canFinish={}", taskWrapper.getRequestId(), canFinish); } @@ -580,7 +578,7 @@ void tryScheduleUnderLock(final TaskWrapper taskWrapper) throws RejectedExecutio // to the tasks are not ready yet, the task is eligible for pre-emptable. if (enablePreemption) { if (!canFinish) { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("{} is not finishable. Adding it to pre-emption queue", taskWrapper.getRequestId()); } @@ -596,7 +594,7 @@ void tryScheduleUnderLock(final TaskWrapper taskWrapper) throws RejectedExecutio private boolean handleScheduleAttemptedRejection(TaskWrapper taskWrapper) { if (enablePreemption && taskWrapper.getTaskRunnerCallable().canFinish() && !preemptionQueue.isEmpty()) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Preemption Queue: " + preemptionQueue); } @@ -610,7 +608,7 @@ private boolean handleScheduleAttemptedRejection(TaskWrapper taskWrapper) { pRequest.getRequestId()); continue; // Try something else. } - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Invoking kill task for {} due to pre-emption to run {}", pRequest.getRequestId(), taskWrapper.getRequestId()); } @@ -767,7 +765,7 @@ private void updatePreemptionListAndNotify(EndReason reason) { if (enablePreemption) { String state = reason == null ? "FAILED" : reason.name(); boolean removed = removeFromPreemptionQueueUnlocked(taskWrapper); - if (removed && isInfoEnabled) { + if (removed && LOG.isInfoEnabled()) { TaskRunnerCallable trc = taskWrapper.getTaskRunnerCallable(); LOG.info(TaskRunnerCallable.getTaskIdentifierString(trc.getRequest(), trc.getVertexSpec(), trc.getQueryId()) + " request " + state + "! Removed from preemption list."); @@ -778,7 +776,7 @@ private void updatePreemptionListAndNotify(EndReason reason) { if (metrics != null) { metrics.setNumExecutorsAvailable(numSlotsAvailable.get()); } - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Task {} complete. WaitQueueSize={}, numSlotsAvailable={}, preemptionQueueSize={}", taskWrapper.getRequestId(), waitQueue.size(), numSlotsAvailable.get(), preemptionQueue.size()); @@ -831,7 +829,7 @@ private void updateFallOffStats( public void shutDown(boolean awaitTermination) { if (!isShutdown.getAndSet(true)) { if (awaitTermination) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("awaitTermination: " + awaitTermination + " shutting down task executor" + " service gracefully"); } @@ -839,7 +837,7 @@ public void shutDown(boolean awaitTermination) { shutdownExecutor(executorService); shutdownExecutor(executionCompletionExecutorService); } else { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("awaitTermination: " + awaitTermination + " shutting down task executor" + " service immediately"); } diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java index 18ce03c..f2ba5ea 100644 --- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java +++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java @@ -102,8 +102,6 @@ private static final Logger LOG = LoggerFactory.getLogger(LlapTaskCommunicator.class); - private static final boolean isInfoEnabled = LOG.isInfoEnabled(); - private static final String RESOURCE_URI_STR = "/ws/v1/applicationhistory"; private static final Joiner JOINER = Joiner.on(""); private static final Joiner PATH_JOINER = Joiner.on("/"); private final ConcurrentMap credentialMap; @@ -598,7 +596,7 @@ public void registerKnownNode(LlapNodeId nodeId) { Long old = knownNodeMap.putIfAbsent(nodeId, TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS)); if (old == null) { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Added new known node: {}", nodeId); } } @@ -609,7 +607,7 @@ public void registerPingingNode(LlapNodeId nodeId) { PingingNodeInfo ni = new PingingNodeInfo(currentTs); PingingNodeInfo old = pingedNodeMap.put(nodeId, ni); if (old == null) { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Added new pinging node: [{}]", nodeId); } } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java index bf30ef1..b151a1d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java @@ -93,9 +93,9 @@ public void process(Object row, int tag) throws HiveException { Writable writableRow = serializer.serialize(row, rowInspector); writableRow.write(buffer); if (buffer.getLength() > MAX_SIZE) { - if (isLogInfoEnabled) { - LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); - } + if (LOG.isInfoEnabled()) { + LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); + } hasReachedMaxSize = true; buffer = null; } @@ -103,7 +103,7 @@ public void process(Object row, int tag) throws HiveException { throw new HiveException(e); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("AppMasterEvent: " + row); } forward(row, rowInspector); @@ -130,9 +130,9 @@ public void closeOp(boolean abort) throws HiveException { InputInitializerEvent.create(vertexName, inputName, ByteBuffer.wrap(payload, 0, payload.length)); - if (isLogInfoEnabled) { - LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName - + ". Payload size = " + payload.length); + if (LOG.isInfoEnabled()) { + LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName + + ". Payload size = " + payload.length); } context.getTezProcessorContext().sendEvents(Collections.singletonList(event)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java index df1898e..07fd653 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java @@ -383,7 +383,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { joinEmitInterval = -1; } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java index c184742..a9f2218 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java @@ -188,7 +188,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { } newChildOperatorsTag[i] = toArray(childOperatorTags); } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("newChildOperatorsTag " + Arrays.toString(newChildOperatorsTag)); } @@ -214,15 +214,14 @@ protected void initializeOp(Configuration hconf) throws HiveException { @Override protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Operator " + id + " " + getName() + " initialized"); LOG.info("Initializing children of " + id + " " + getName()); } for (int i = 0; i < childOperatorsArray.length; i++) { - if (isLogInfoEnabled) { - LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + - childOperatorsArray[i].getName() + - " " + childInputObjInspectors[i].length); + if (LOG.isInfoEnabled()) { + LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + + childOperatorsArray[i].getName() + " " + childInputObjInspectors[i].length); } // We need to initialize those MuxOperators first because if we first // initialize other operators, the states of all parents of those MuxOperators @@ -247,10 +246,9 @@ protected void initializeChildren(Configuration hconf) throws HiveException { } } for (int i = 0; i < childOperatorsArray.length; i++) { - if (isLogInfoEnabled) { - LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + - childOperatorsArray[i].getName() + - " " + childInputObjInspectors[i].length); + if (LOG.isInfoEnabled()) { + LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + + childOperatorsArray[i].getName() + " " + childInputObjInspectors[i].length); } if (!(childOperatorsArray[i] instanceof MuxOperator)) { childOperatorsArray[i].initialize(hconf, childInputObjInspectors[i]); @@ -275,7 +273,7 @@ public void process(Object row, int tag) throws HiveException { endGroupIfNecessary(currentChildIndex); int oldTag = newTagToOldTag[tag]; - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { cntrs[tag]++; if (cntrs[tag] == nextCntrs[tag]) { LOG.debug(id + " (newTag, childIndex, oldTag)=(" + tag + ", " + currentChildIndex + ", " @@ -311,9 +309,9 @@ protected void closeOp(boolean abort) throws HiveException { int newTag = i; int oldTag = newTagToOldTag[i]; int childIndex = newTagToChildIndex[newTag]; - if (isLogInfoEnabled) { - LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", " - + oldTag + "), forwarded " + cntrs[newTag] + " rows"); + if (LOG.isInfoEnabled()) { + LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", " + + oldTag + "), forwarded " + cntrs[newTag] + " rows"); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 4d727ba..2504797 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -93,8 +93,6 @@ Serializable { public static final Logger LOG = LoggerFactory.getLogger(FileSinkOperator.class); - private static final boolean isInfoEnabled = LOG.isInfoEnabled(); - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); protected transient HashMap valToPaths; protected transient int numDynParts; @@ -160,7 +158,7 @@ public FSPaths(Path specPath) { finalPaths = new Path[numFiles]; outWriters = new RecordWriter[numFiles]; updaters = new RecordUpdater[numFiles]; - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Created slots for " + numFiles); } stat = new Stat(); @@ -370,7 +368,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { serializer.initialize(unsetNestedColumnPaths(hconf), conf.getTableInfo().getProperties()); outputClass = serializer.getSerializedClass(); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Using serializer : " + serializer + " and formatter : " + hiveOutputFormat + (isCompressed ? " with compression" : "")); } @@ -518,13 +516,13 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { Set seenBuckets = new HashSet(); for (int idx = 0; idx < totalFiles; idx++) { if (this.getExecContext() != null && this.getExecContext().getFileId() != null) { - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("replace taskId from execContext "); } taskId = Utilities.replaceTaskIdFromFilename(taskId, this.getExecContext().getFileId()); - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("new taskId: FS " + taskId); } @@ -580,11 +578,11 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) try { if (isNativeTable) { fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null); - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]); } fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId); - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]); } } else { @@ -601,7 +599,7 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension); } - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); } @@ -741,7 +739,7 @@ public void process(Object row, int tag) throws HiveException { fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1); } - if ((++numRows == cntr) && isLogInfoEnabled) { + if ((++numRows == cntr) && LOG.isInfoEnabled()) { cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows; if (cntr < 0 || numRows < 0) { cntr = 0; @@ -776,7 +774,7 @@ public void process(Object row, int tag) throws HiveException { fpaths.updaters[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 : ++fpaths.acidFileOffset] = HiveFileFormatUtils.getAcidRecordUpdater( jc, conf.getTableInfo(), bucketNum, conf, fpaths.outPaths[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset], rowInspector, reporter, 0); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Created updater for bucket number " + bucketNum + " using file " + fpaths.outPaths[conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED) ? 0 :fpaths.acidFileOffset]); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index f8b55da..2f9066a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -748,7 +748,7 @@ public void process(Object row, int tag) throws HiveException { flushHashTable(true); hashAggr = false; } else { - if (isLogTraceEnabled) { + if (LOG.isTraceEnabled()) { LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl + " #total = " + numRowsInput + " reduction = " + 1.0 * (numRowsHashTbl / numRowsInput) + " minReduction = " @@ -946,7 +946,7 @@ private boolean shouldBeFlushed(KeyWrapper newKeys) { // Update the number of entries that can fit in the hash table numEntriesHashTable = (int) (maxHashTblMemory / (fixedRowSize + (totalVariableSize / numEntriesVarSize))); - if (isLogTraceEnabled) { + if (LOG.isTraceEnabled()) { LOG.trace("Hash Aggr: #hash table = " + numEntries + " #max in hash table = " + numEntriesHashTable); } @@ -997,14 +997,14 @@ private void flushHashTable(boolean complete) throws HiveException { } hashAggregations.clear(); hashAggregations = null; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Hash Table completed flushed"); } return; } int oldSize = hashAggregations.size(); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Hash Tbl flush: #hash table = " + oldSize); } Iterator> iter = hashAggregations @@ -1016,7 +1016,7 @@ private void flushHashTable(boolean complete) throws HiveException { iter.remove(); numDel++; if (numDel * 10 >= oldSize) { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Hash Table flushed: new size = " + hashAggregations.size()); } return; @@ -1056,10 +1056,9 @@ private void forward(Object[] keys, AggregationBuffer[] aggs) throws HiveExcepti public void flush() throws HiveException{ try { if (hashAggregations != null) { - if (isLogInfoEnabled) { - LOG.info("Begin Hash Table flush: size = " - + hashAggregations.size()); - } + if (LOG.isInfoEnabled()) { + LOG.info("Begin Hash Table flush: size = " + hashAggregations.size()); + } Iterator iter = hashAggregations.entrySet().iterator(); while (iter.hasNext()) { Map.Entry m = (Map.Entry) iter diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java index 3a366f6..f8ea701 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java @@ -275,7 +275,7 @@ private boolean hasFilter(int alias) { public void closeOp(boolean abort) throws HiveException { try { if (mapJoinTables == null) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("mapJoinTables is null"); } } else { @@ -292,7 +292,7 @@ public void closeOp(boolean abort) throws HiveException { protected void flushToFile() throws IOException, HiveException { // get tmp file URI Path tmpURI = getExecContext().getLocalWork().getTmpPath(); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Temp URI for side table: " + tmpURI); } for (byte tag = 0; tag < mapJoinTables.length; tag++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index 0282763..a4bca45 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -113,7 +113,7 @@ public void process(Object row, int tag) throws HiveException { storage[alias].clearRows(); } } else { - if (isLogInfoEnabled && (sz == nextSz)) { + if (LOG.isInfoEnabled() && (sz == nextSz)) { // Print a message if we reached at least 1000 rows for a join operand // We won't print a message for the last join operand since the size // will never goes to joinEmitInterval. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 07aa2ea..56d1f44 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -173,7 +173,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { * requires changes in the Tez API with regard to finding bucket id and * also ability to schedule tasks to re-use containers that have cached the specific bucket. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("This is not bucket map join, so cache"); } @@ -314,7 +314,7 @@ public void generateMapMetaData() throws HiveException { try { loader.load(mapJoinTables, mapJoinTableSerdes); } catch (HiveException e) { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Exception loading hash tables. Clearing partially loaded hash table containers."); } @@ -554,7 +554,7 @@ public void closeOp(boolean abort) throws HiveException { } } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("spilled: " + spilled + " abort: " + abort + ". Clearing spilled partitions."); } @@ -568,7 +568,7 @@ public void closeOp(boolean abort) throws HiveException { && (this.getExecContext().getLocalWork().getInputFileChangeSensitive()) && !(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark") && SparkUtilities.isDedicatedCluster(hconf))) { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("MR: Clearing all map join table containers."); } clearAllTableContainers(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index 2a46b30..d801ae7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -428,7 +428,7 @@ public void setChildren(Configuration hconf) throws Exception { for (String alias : aliases) { Operator op = conf.getAliasToWork().get(alias); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Adding alias " + alias + " to work list for file " + onefile); } @@ -469,7 +469,7 @@ private void initOperatorContext(List> children if (prev != null && !prev.equals(context.rowObjectInspector)) { throw new HiveException("Conflict on row inspector for " + context.alias); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("dump " + context.op + " " + context.rowObjectInspector.getTypeName()); } } @@ -509,7 +509,7 @@ public void cleanUpInputFileChangedOp() throws HiveException { Path fpath = getExecContext().getCurrentInputPath(); String nominalPath = getNominalPath(fpath); Map, MapOpCtx> contexts = opCtxMap.get(nominalPath); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { StringBuilder builder = new StringBuilder(); for (MapOpCtx context : contexts.values()) { if (builder.length() > 0) { @@ -517,7 +517,7 @@ public void cleanUpInputFileChangedOp() throws HiveException { } builder.append(context.alias); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath); } } @@ -567,7 +567,7 @@ public void process(Writable value) throws HiveException { protected final void rowsForwarded(int childrenDone, int rows) { numRows += rows; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { while (numRows >= cntr) { cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows; if (cntr < 0 || numRows < 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java index 9849243..82d0017 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java @@ -225,13 +225,13 @@ protected void initializeOp(Configuration hconf) throws HiveException { @Override protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Operator " + id + " " + getName() + " initialized"); } if (childOperators == null || childOperators.isEmpty()) { return; } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Initializing children of " + id + " " + getName()); } childOperatorsArray[0].initialize(hconf, outputObjectInspectors); @@ -242,7 +242,7 @@ protected void initializeChildren(Configuration hconf) throws HiveException { @Override public void process(Object row, int tag) throws HiveException { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { cntrs[tag]++; if (cntrs[tag] == nextCntrs[tag]) { LOG.info(id + ", tag=" + tag + ", forwarding " + cntrs[tag] + " rows"); @@ -317,7 +317,7 @@ public void processGroup(int tag) throws HiveException { @Override protected void closeOp(boolean abort) throws HiveException { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { for (int i = 0; i < numParents; i++) { LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java index 8b04cd4..ffa5f41 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java @@ -219,9 +219,6 @@ public RowSchema getSchema() { protected transient OutputCollector out; protected transient final Logger LOG = LoggerFactory.getLogger(getClass().getName()); protected transient final Logger PLOG = LoggerFactory.getLogger(Operator.class.getName()); // for simple disabling logs from all operators - protected transient final boolean isLogInfoEnabled = LOG.isInfoEnabled() && PLOG.isInfoEnabled(); - protected transient final boolean isLogDebugEnabled = LOG.isDebugEnabled() && PLOG.isDebugEnabled(); - protected transient final boolean isLogTraceEnabled = LOG.isTraceEnabled() && PLOG.isTraceEnabled(); protected transient String alias; protected transient Reporter reporter; protected String id; @@ -330,7 +327,7 @@ public final void initialize(Configuration hconf, ObjectInspector[] inputOIs) return; } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Initializing operator " + this); } @@ -369,7 +366,7 @@ public final void initialize(Configuration hconf, ObjectInspector[] inputOIs) || childOperatorsArray.length != childOperators.size()) { throw new AssertionError("Internal error during operator initialization"); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Initialization Done " + id + " " + getName()); } @@ -382,7 +379,7 @@ public final void initialize(Configuration hconf, ObjectInspector[] inputOIs) } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Initialization Done " + id + " " + getName() + " done is reset."); } @@ -495,13 +492,13 @@ protected void initializeOp(Configuration hconf) throws HiveException { */ protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Operator " + id + " " + getName() + " initialized"); } if (childOperators == null || childOperators.isEmpty()) { return; } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Initializing children of " + id + " " + getName()); } for (int i = 0; i < childOperatorsArray.length; i++) { @@ -540,7 +537,7 @@ public void passExecContext(ExecMapperContext execContext) { */ protected void initialize(Configuration hconf, ObjectInspector inputOI, int parentId) throws HiveException { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Initializing child " + id + " " + getName()); } // Double the size of the array if needed @@ -581,7 +578,7 @@ public ObjectInspector getOutputObjInspector() { public abstract void process(Object row, int tag) throws HiveException; protected final void defaultStartGroup() throws HiveException { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Starting group"); } @@ -589,20 +586,20 @@ protected final void defaultStartGroup() throws HiveException { return; } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Starting group for children:"); } for (Operator op : childOperators) { op.startGroup(); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Start group Done"); } } protected final void defaultEndGroup() throws HiveException { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Ending group"); } @@ -610,14 +607,14 @@ protected final void defaultEndGroup() throws HiveException { return; } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Ending group for children:"); } for (Operator op : childOperators) { op.endGroup(); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("End group Done"); } } @@ -652,9 +649,9 @@ protected boolean allInitializedParentsAreClosed() { if(parent==null){ continue; } - if (isLogDebugEnabled) { - LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state); - } + if (LOG.isDebugEnabled()) { + LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state); + } if (!(parent.state == State.CLOSE || parent.state == State.UNINIT)) { return false; } @@ -667,7 +664,7 @@ protected boolean allInitializedParentsAreClosed() { // since it is called by its parents' main thread, so no // more than 1 thread should call this close() function. public void close(boolean abort) throws HiveException { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("close called for operator " + this); } @@ -677,7 +674,7 @@ public void close(boolean abort) throws HiveException { // check if all parents are finished if (!allInitializedParentsAreClosed()) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Not all parent operators are closed. Not closing."); } return; @@ -686,7 +683,7 @@ public void close(boolean abort) throws HiveException { // set state as CLOSE as long as all parents are closed // state == CLOSE doesn't mean all children are also in state CLOSE state = State.CLOSE; - if (isLogInfoEnabled) { + if (LOG.isDebugEnabled()) { LOG.info("Closing operator " + this); } @@ -705,13 +702,13 @@ public void close(boolean abort) throws HiveException { } for (Operator op : childOperators) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Closing child = " + op); } op.close(abort); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(id + " Close done"); } } catch (HiveException e) { @@ -938,7 +935,7 @@ public void preorderMap(OperatorFunc opFunc) { } public void logStats() { - if (isLogInfoEnabled && !statsMap.isEmpty()) { + if (LOG.isInfoEnabled() && !statsMap.isEmpty()) { StringBuilder sb = new StringBuilder(); for (Map.Entry e : statsMap.entrySet()) { sb.append(e.getKey()).append(":").append(e.getValue()).append(", "); @@ -1364,7 +1361,7 @@ public OpTraits getOpTraits() { } public void setOpTraits(OpTraits metaInfo) { - if (isLogDebugEnabled) { + if (LOG.isInfoEnabled()) { LOG.debug("Setting traits (" + metaInfo + ") on " + this); } if (conf != null) { @@ -1375,7 +1372,7 @@ public void setOpTraits(OpTraits metaInfo) { } public void setStatistics(Statistics stats) { - if (isLogDebugEnabled) { + if (LOG.isInfoEnabled()) { LOG.debug("Setting stats (" + stats + ") on " + this); } if (conf != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java index e3cb765..d9547b9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java @@ -100,7 +100,7 @@ private void processKeyValuePairs(Object key, Object value) if (prevPath == null) { prevPath = k.getInputPath(); reader = OrcFile.createReader(fs, k.getInputPath()); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("ORC merge file input path: " + k.getInputPath()); } } @@ -127,7 +127,7 @@ private void processKeyValuePairs(Object key, Object value) } outWriter = OrcFile.createWriter(outPath, options); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.info("ORC merge file output path: " + outPath); } } @@ -152,7 +152,7 @@ private void processKeyValuePairs(Object key, Object value) outWriter.appendStripe(buffer, 0, buffer.length, v.getStripeInformation(), v.getStripeStatistics()); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : " + v.getStripeInformation().getOffset() + " length: " + v.getStripeInformation().getLength() + " row: " diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java index e03f4b7..92741ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java @@ -167,7 +167,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { List keys = conf.getKeyCols(); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("keys size is " + keys.size()); for (ExprNodeDesc k : keys) { LOG.debug("Key exprNodeDesc " + k.getExprString()); @@ -215,7 +215,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { tag = conf.getTag(); tagByte[0] = (byte) tag; skipTag = conf.getSkipTag(); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Using tag = " + tag); } @@ -310,7 +310,7 @@ public void process(Object row, int tag) throws HiveException { // TODO: this is fishy - we init object inspectors based on first tag. We // should either init for each tag, or if rowInspector doesn't really // matter, then we can create this in ctor and get rid of firstRow. - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("keys are " + conf.getOutputKeyColumnNames() + " num distributions: " + conf.getNumDistributionKeys()); } @@ -461,7 +461,7 @@ private int computeHashCode(Object row, int buckNum) throws HiveException { keyHashCode = ObjectInspectorUtils.getBucketHashCode(bucketFieldValues, partitionObjectInspectors); } int hashCode = buckNum < 0 ? keyHashCode : keyHashCode * 31 + buckNum; - if (isLogTraceEnabled) { + if (LOG.isTraceEnabled()) { LOG.trace("Going to return hash code " + hashCode); } return hashCode; @@ -508,7 +508,7 @@ protected void collect(BytesWritable keyWritable, Writable valueWritable) throws if (null != out) { numRows++; runTimeNumRows++; - if (isLogInfoEnabled) { + if (LOG.isTraceEnabled()) { if (numRows == cntr) { cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows; if (cntr < 0 || numRows < 0) { @@ -543,7 +543,7 @@ protected void closeOp(boolean abort) throws HiveException { out = null; random = null; reducerHash = null; - if (isLogInfoEnabled) { + if (LOG.isTraceEnabled()) { LOG.info(toString() + ": records written - " + numRows); } recordCounter.set(numRows); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java index 7c1e344..64aa744 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java @@ -542,7 +542,7 @@ private void setUpFetchContexts(String alias, MergeQueue mergeQueue) throws Hive BucketMatcher bucketMatcher = ReflectionUtil.newInstance(bucketMatcherCls, null); getExecContext().setFileId(bucketMatcherCxt.createFileId(currentInputPath.toString())); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("set task id: " + getExecContext().getFileId()); } @@ -768,9 +768,9 @@ public final InspectableObject getNextRow() throws IOException { } Integer current = top(); if (current == null) { - if (isLogInfoEnabled) { - LOG.info("MergeQueue forwarded " + counter + " rows"); - } + if (LOG.isInfoEnabled()) { + LOG.info("MergeQueue forwarded " + counter + " rows"); + } return null; } counter++; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index 4767af1..e15bbba 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -300,7 +300,7 @@ boolean allowPartialConsumption() { } void displayBrokenPipeInfo() { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("The script did not consume all input data. This is considered as an error."); LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it."); } @@ -346,7 +346,7 @@ public void process(Object row, int tag) throws HiveException { } String[] wrappedCmdArgs = addWrapper(cmdArgs); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Executing " + Arrays.asList(wrappedCmdArgs)); LOG.info("tablename=" + tableName); LOG.info("partname=" + partitionName); @@ -680,7 +680,7 @@ public void processLine(Writable line) throws HiveException { long now = System.currentTimeMillis(); // reporter is a member variable of the Operator class. if (now - lastReportTime > 60 * 1000 && reporter != null) { - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("ErrorStreamProcessor calling reporter.progress()"); } lastReportTime = now; @@ -738,7 +738,7 @@ public void run() { } proc.processLine(row); } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("StreamThread " + name + " done"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java index 94af097..9e96126 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java @@ -69,7 +69,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { eval = ExprNodeEvaluatorFactory.toCachedEvals(eval); } output = new Object[eval.length]; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("SELECT " + inputObjInspectors[0].getTypeName()); } outputObjInspector = initEvaluatorsAndReturnStruct(eval, conf.getOutputColumnNames(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java index 68477ca..17f2efb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java @@ -188,7 +188,7 @@ private void gatherStats(Object row) { values.add(o == null ? defaultPartitionName : o.toString()); } partitionSpecs = FileUtils.makePartName(conf.getPartColumns(), values); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Stats Gathering found a new partition spec = " + partitionSpecs); } } @@ -331,7 +331,7 @@ private void publishStats() throws HiveException { sc.setStatsTmpDir(conf.getTmpStatsDir()); if (!statsPublisher.connect(sc)) { // just return, stats gathering should not block the main query. - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("StatsPublishing error: cannot connect to database."); } if (isStatsReliable) { @@ -355,8 +355,8 @@ private void publishStats() throws HiveException { throw new HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg()); } } - if (isLogInfoEnabled) { - LOG.info("publishing : " + key + " : " + statsToPublish.toString()); + if (LOG.isInfoEnabled()) { + LOG.info("publishing : " + key + " : " + statsToPublish.toString()); } } if (!statsPublisher.closeConnection(sc)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java index 3df5533..99822a3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java @@ -126,7 +126,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // to // create ObjectInspectors. needsTransform[p] = (inputObjInspectors[p] != outputObjInspector); - if (isLogInfoEnabled && needsTransform[p]) { + if (LOG.isInfoEnabled() && needsTransform[p]) { LOG.info("Union Operator needs to transform row from parent[" + p + "] from " + inputObjInspectors[p] + " to " + outputObjInspector); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java index 1dffff2..2e08d5c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java @@ -64,9 +64,6 @@ public class ExecReducer extends MapReduceBase implements Reducer { private static final Logger LOG = LoggerFactory.getLogger("ExecReducer"); - private static final boolean isInfoEnabled = LOG.isInfoEnabled(); - private static final boolean isTraceEnabled = LOG.isTraceEnabled(); - private static final String PLAN_KEY = "__REDUCE_PLAN__"; // Input value serde needs to be an array to support different SerDe // for different tags @@ -96,7 +93,7 @@ public void configure(JobConf job) { ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; ObjectInspector keyObjectInspector; - if (isInfoEnabled) { + if (LOG.isInfoEnabled()) { try { LOG.info("conf classpath = " + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs())); @@ -190,7 +187,7 @@ public void reduce(Object key, Iterator values, OutputCollector output, groupKey = new BytesWritable(); } else { // If a operator wants to do some work at the end of a group - if (isTraceEnabled) { + if (LOG.isTraceEnabled()) { LOG.trace("End Group"); } reducer.endGroup(); @@ -207,7 +204,7 @@ public void reduce(Object key, Iterator values, OutputCollector output, } groupKey.set(keyWritable.get(), 0, keyWritable.getSize()); - if (isTraceEnabled) { + if (LOG.isTraceEnabled()) { LOG.trace("Start Group"); } reducer.startGroup(); @@ -263,14 +260,14 @@ public void reduce(Object key, Iterator values, OutputCollector output, public void close() { // No row was processed - if (oc == null && isTraceEnabled) { + if (oc == null && LOG.isTraceEnabled()) { LOG.trace("Close called without any rows processed"); } try { if (groupKey != null) { // If a operator wants to do some work at the end of a group - if (isTraceEnabled) { + if (LOG.isTraceEnabled()) { LOG.trace("End Group"); } reducer.endGroup(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java index cfe1750..5589a07 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ObjectCache.java @@ -36,12 +36,11 @@ public class ObjectCache implements org.apache.hadoop.hive.ql.exec.ObjectCache { private static final Logger LOG = LoggerFactory.getLogger(ObjectCache.class.getName()); - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); @Override public void release(String key) { // nothing to do - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(key + " no longer needed"); } } @@ -54,7 +53,7 @@ public void release(String key) { @Override public T retrieve(String key, Callable fn) throws HiveException { try { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Creating " + key); } return fn.call(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java index 48dfedc..8333cf5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java @@ -58,7 +58,6 @@ private static final Logger LOG = LoggerFactory.getLogger(SparkMapRecordHandler.class); private AbstractMapOperator mo; private MapredLocalWork localWork = null; - private boolean isLogInfoEnabled = false; private ExecMapperContext execContext; @Override @@ -66,8 +65,6 @@ perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_INIT_OPERATORS); super.init(job, output, reporter); - isLogInfoEnabled = LOG.isInfoEnabled(); - try { jc = job; execContext = new ExecMapperContext(jc); @@ -134,7 +131,7 @@ public void processRow(Object key, Object value) throws IOException { // Since there is no concept of a group, we don't invoke // startGroup/endGroup for a mapper mo.process((Writable) value); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { logMemoryInfo(); } } catch (Throwable e) { @@ -182,7 +179,7 @@ public void close() { } } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { logCloseInfo(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java index 7eaad18..e473580 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java @@ -75,7 +75,6 @@ private final Deserializer[] inputValueDeserializer = new Deserializer[Byte.MAX_VALUE]; private final Object[] valueObject = new Object[Byte.MAX_VALUE]; private final List row = new ArrayList(Utilities.reduceFieldNameList.size()); - private final boolean isLogInfoEnabled = LOG.isInfoEnabled(); // TODO: move to DynamicSerDe when it's ready private Deserializer inputKeyDeserializer; @@ -338,7 +337,7 @@ public void processRow(Object key, final Object value) throws IOException { row.clear(); row.add(keyObject); row.add(valueObject[tag]); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { logMemoryInfo(); } try { @@ -390,7 +389,7 @@ public void processRow(Object key, final Object value) throws IOException { reducer.process(batch, tag); rowIdx = 0; batchBytes = 0; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { logMemoryInfo(); } } @@ -399,7 +398,7 @@ public void processRow(Object key, final Object value) throws IOException { VectorizedBatchUtil.setBatchSize(batch, rowIdx); reducer.process(batch, tag); } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { logMemoryInfo(); } } catch (Exception e) { @@ -441,7 +440,7 @@ public void close() { LOG.trace("End Group"); reducer.endGroup(); } - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { logCloseInfo(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java index ecd4ddc..b63b673 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java @@ -31,7 +31,6 @@ */ public class ColumnarSplitSizeEstimator implements SplitSizeEstimator { private static final Logger LOG = LoggerFactory.getLogger(ColumnarSplitSizeEstimator.class); - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); @Override public long getEstimatedSize(InputSplit inputSplit) throws IOException { @@ -39,7 +38,7 @@ public long getEstimatedSize(InputSplit inputSplit) throws IOException { if (inputSplit instanceof ColumnarSplit) { colProjSize = ((ColumnarSplit) inputSplit).getColumnarProjectionSize(); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Estimated column projection size: " + colProjSize); } } else if (inputSplit instanceof HiveInputFormat.HiveInputSplit) { @@ -47,7 +46,7 @@ public long getEstimatedSize(InputSplit inputSplit) throws IOException { if (innerSplit instanceof ColumnarSplit) { colProjSize = ((ColumnarSplit) innerSplit).getColumnarProjectionSize(); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Estimated column projection size: " + colProjSize); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java index dcb985f..c5d96e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HostAffinitySplitLocationProvider.java @@ -58,7 +58,7 @@ public HostAffinitySplitLocationProvider(List knownLocations) { @Override public String[] getLocations(InputSplit split) throws IOException { if (!(split instanceof FileSplit)) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Split: " + split + " is not a FileSplit. Using default locations"); } return split.getLocations(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java index 1ce8ee9..b26e0eb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/LlapObjectCache.java @@ -44,8 +44,6 @@ private static ExecutorService staticPool = Executors.newCachedThreadPool(); - private static final boolean isLogDebugEnabled = LOG.isDebugEnabled(); - private final Cache registry = CacheBuilder.newBuilder().softValues().build(); private final Map locks = new HashMap(); @@ -67,7 +65,7 @@ public void release(String key) { lock.lock(); try { value = (T) registry.getIfPresent(key); - if (value != null && isLogDebugEnabled) { + if (value != null && LOG.isDebugEnabled()) { LOG.debug("Found " + key + " in cache"); } return value; @@ -87,7 +85,7 @@ public void release(String key) { try { value = (T) registry.getIfPresent(key); if (value != null) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Found " + key + " in cache"); } return value; @@ -109,7 +107,7 @@ public void release(String key) { try { value = (T) registry.getIfPresent(key); if (value != null) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Found " + key + " in cache"); } return value; @@ -126,7 +124,7 @@ public void release(String key) { lock.lock(); try { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Caching new object for key: " + key); } @@ -153,7 +151,7 @@ public T call() throws Exception { @Override public void remove(String key) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Removing key: " + key); } registry.invalidate(key); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java index 106a534..d16a97a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java @@ -56,9 +56,6 @@ public static final Logger l4j = LoggerFactory.getLogger(RecordProcessor.class); - // used to log memory usage periodically - protected boolean isLogInfoEnabled = false; - protected boolean isLogTraceEnabled = false; protected MRTaskReporter reporter; protected PerfLogger perfLogger = SessionState.getPerfLogger(); @@ -82,9 +79,6 @@ void init(MRTaskReporter mrReporter, this.inputs = inputs; this.outputs = outputs; - isLogInfoEnabled = l4j.isInfoEnabled(); - isLogTraceEnabled = l4j.isTraceEnabled(); - checkAbortCondition(); //log classpaths diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java index f854132..7b8e7ea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java @@ -280,7 +280,7 @@ protected void determineCommonInfo(boolean isOuter) throws HiveException { outputProjection = projectionMapping.getOutputColumns(); outputTypeInfos = projectionMapping.getTypeInfos(); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { int[] orderDisplayable = new int[order.length]; for (int i = 0; i < order.length; i++) { orderDisplayable[i] = (int) order[i]; @@ -338,7 +338,7 @@ protected void determineCommonInfo(boolean isOuter) throws HiveException { * columns and new scratch columns. */ protected void setupVOutContext(List outputColumnNames) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor outputColumnNames " + outputColumnNames); } if (outputColumnNames.size() != outputProjection.length) { @@ -350,7 +350,7 @@ protected void setupVOutContext(List outputColumnNames) { int outputColumn = outputProjection[i]; vOutContext.addProjectionColumn(columnName, outputColumn); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator constructor addProjectionColumn " + i + " columnName " + columnName + " outputColumn " + outputColumn); } } @@ -423,7 +423,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { needCommonSetup = true; needHashTableSetup = true; - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { int[] currentScratchColumns = vOutContext.currentScratchColumns(); LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp currentScratchColumns " + Arrays.toString(currentScratchColumns)); @@ -515,7 +515,7 @@ private void allocateOverflowBatchColumnVector(VectorizedRowBatch overflowBatch, overflowBatch.cols[outputColumn] = VectorizedBatchUtil.createColumnVector(typeInfo); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(getLoggingPrefix() + " VectorMapJoinCommonOperator initializeOp overflowBatch outputColumn " + outputColumn + " class " + overflowBatch.cols[outputColumn].getClass().getSimpleName()); } } @@ -526,7 +526,7 @@ private void allocateOverflowBatchColumnVector(VectorizedRowBatch overflowBatch, */ protected void commonSetup(VectorizedRowBatch batch) throws HiveException { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("VectorMapJoinInnerCommonOperator commonSetup begin..."); displayBatchColumns(batch, "batch"); displayBatchColumns(overflowBatch, "overflowBatch"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java index c4d5113..1c20d93 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java @@ -544,7 +544,7 @@ protected void reloadHashTable(byte pos, int partitionId) needHashTableSetup = true; LOG.info("Created " + vectorMapJoinHashTable.getClass().getSimpleName() + " from " + this.getClass().getSimpleName()); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " reloadHashTable!"); } } @@ -553,7 +553,7 @@ protected void reloadHashTable(byte pos, int partitionId) protected void reProcessBigTable(int partitionId) throws HiveException { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " reProcessBigTable enter..."); } @@ -607,7 +607,7 @@ protected void reProcessBigTable(int partitionId) throw new HiveException(e); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " reProcessBigTable exit! " + rowCount + " row processed and " + batchCount + " batches processed"); } } @@ -680,7 +680,7 @@ public void closeOp(boolean aborted) throws HiveException { if (!aborted && overflowBatch.size > 0) { forwardOverflow(); } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("VectorMapJoinInnerLongOperator closeOp " + batchCounter + " batches processed"); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java index 43f3951..84edff2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyLongOperator.java @@ -154,7 +154,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -212,7 +212,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]); @@ -222,7 +222,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -377,7 +377,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java index 95fb0c2..7fe875b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyMultiKeyOperator.java @@ -161,7 +161,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -227,7 +227,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]); @@ -237,7 +237,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -386,7 +386,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java index 044e3e6..3869b91 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerBigOnlyStringOperator.java @@ -145,7 +145,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -205,7 +205,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishInnerBigOnlyRepeated(batch, joinResult, hashMultiSetResults[0]); @@ -215,7 +215,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -363,7 +363,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + " equalKeySeriesValueCounts " + longArrayToRangesString(equalKeySeriesValueCounts, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java index c85e1d8..b88a14d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerLongOperator.java @@ -152,7 +152,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -210,7 +210,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishInnerRepeated(batch, joinResult, hashMapResults[0]); @@ -220,7 +220,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -374,7 +374,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java index a108cd0..6dc6be8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerMultiKeyOperator.java @@ -158,7 +158,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -224,7 +224,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishInnerRepeated(batch, joinResult, hashMapResults[0]); @@ -234,7 +234,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -382,7 +382,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java index 3211d7d..64e4f9c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinInnerStringOperator.java @@ -143,7 +143,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -199,7 +199,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishInnerRepeated(batch, joinResult, hashMapResults[0]); @@ -209,7 +209,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -356,7 +356,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java index b02e6fd..2a3f8b9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiLongOperator.java @@ -154,7 +154,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -212,7 +212,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]); @@ -222,7 +222,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -370,7 +370,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + " spills " + intArrayToRangesString(spills, spillCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java index 36b8f3f..2c7c30c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiMultiKeyOperator.java @@ -160,7 +160,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -227,7 +227,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]); @@ -237,7 +237,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -382,7 +382,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + " spills " + intArrayToRangesString(spills, spillCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java index 0b3de0a..e00dfc7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinLeftSemiStringOperator.java @@ -145,7 +145,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -202,7 +202,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishLeftSemiRepeated(batch, joinResult, hashSetResults[0]); @@ -212,7 +212,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -353,7 +353,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " allMatchs " + intArrayToRangesString(allMatchs, allMatchCount) + " spills " + intArrayToRangesString(spills, spillCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java index 0e2d65a..1b1a3db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterGenerateResultOperator.java @@ -442,7 +442,7 @@ public void finishOuter(VectorizedRowBatch batch, int nonSpillCount = subtractFromInputSelected( inputSelectedInUse, inputLogicalSize, spills, spillCount, nonSpills); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("finishOuter spillCount > 0" + " nonSpills " + intArrayToRangesString(nonSpills, nonSpillCount)); } @@ -458,7 +458,7 @@ public void finishOuter(VectorizedRowBatch batch, noMatchCount = subtract(nonSpills, nonSpillCount, allMatchs, allMatchCount, noMatchs); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("finishOuter spillCount > 0" + " noMatchs " + intArrayToRangesString(noMatchs, noMatchCount)); } @@ -473,7 +473,7 @@ public void finishOuter(VectorizedRowBatch batch, noMatchCount = subtractFromInputSelected( inputSelectedInUse, inputLogicalSize, allMatchs, allMatchCount, noMatchs); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("finishOuter spillCount == 0" + " noMatchs " + intArrayToRangesString(noMatchs, noMatchCount)); } @@ -507,7 +507,7 @@ public void finishOuter(VectorizedRowBatch batch, batch.size = numSel; batch.selectedInUse = true; - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("finishOuter allMatchCount > 0" + " batch.selected " + intArrayToRangesString(batch.selected, batch.size)); } @@ -525,7 +525,7 @@ public void finishOuter(VectorizedRowBatch batch, int mergeCount = sortMerge( noMatchs, noMatchCount, batch.selected, batch.size, merged); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("finishOuter noMatchCount > 0 && batch.size > 0" + " merged " + intArrayToRangesString(merged, mergeCount)); } @@ -543,7 +543,7 @@ public void finishOuter(VectorizedRowBatch batch, batch.size = noMatchCount; batch.selectedInUse = true; - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("finishOuter noMatchCount > 0 && batch.size == 0" + " batch.selected " + intArrayToRangesString(batch.selected, batch.size)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java index 72309e8..cb0ec96 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterLongOperator.java @@ -144,7 +144,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -173,7 +173,7 @@ public void process(Object row, int tag) throws HiveException { ve.evaluate(batch); } someRowsFilteredOut = (batch.size != inputLogicalSize); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { if (batch.selectedInUse) { if (inputSelectedInUse) { LOG.debug(CLASS_NAME + @@ -246,7 +246,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut, @@ -257,7 +257,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -427,7 +427,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java index a4fc7d3..4d9c302 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterMultiKeyOperator.java @@ -149,7 +149,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -178,7 +178,7 @@ public void process(Object row, int tag) throws HiveException { ve.evaluate(batch); } someRowsFilteredOut = (batch.size != inputLogicalSize); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { if (batch.selectedInUse) { if (inputSelectedInUse) { LOG.debug(CLASS_NAME + @@ -265,7 +265,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut, @@ -276,7 +276,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -445,7 +445,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java index 6e7e5cb..f1a5c2e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinOuterStringOperator.java @@ -134,7 +134,7 @@ public void process(Object row, int tag) throws HiveException { final int inputLogicalSize = batch.size; if (inputLogicalSize == 0) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " empty"); } return; @@ -163,7 +163,7 @@ public void process(Object row, int tag) throws HiveException { ve.evaluate(batch); } someRowsFilteredOut = (batch.size != inputLogicalSize); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { if (batch.selectedInUse) { if (inputSelectedInUse) { LOG.debug(CLASS_NAME + @@ -234,7 +234,7 @@ public void process(Object row, int tag) throws HiveException { * Common repeated join result processing. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " repeated joinResult " + joinResult.name()); } finishOuterRepeated(batch, joinResult, hashMapResults[0], someRowsFilteredOut, @@ -245,7 +245,7 @@ public void process(Object row, int tag) throws HiveException { * NOT Repeating. */ - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " non-repeated"); } @@ -413,7 +413,7 @@ public void process(Object row, int tag) throws HiveException { } } - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug(CLASS_NAME + " batch #" + batchCounter + " allMatchs " + intArrayToRangesString(allMatchs,allMatchCount) + " equalKeySeriesHashMapResultIndices " + intArrayToRangesString(equalKeySeriesHashMapResultIndices, equalKeySeriesCount) + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java index 10bc902..89087e1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashTable.java @@ -40,8 +40,6 @@ private static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastBytesHashTable.class); - private final boolean isLogDebugEnabled = LOG.isDebugEnabled(); - protected VectorMapJoinFastKeyStore keyStore; protected BytesWritable testKeyBytesWritable; @@ -90,7 +88,7 @@ public void add(byte[] keyBytes, int keyStart, int keyLength, BytesWritable curr } if (largestNumberOfSteps < i) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Probed " + i + " slots (the longest so far) to find space"); } largestNumberOfSteps = i; @@ -144,7 +142,7 @@ private void expandAndRehash() { } if (newLargestNumberOfSteps < i) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Probed " + i + " slots (the longest so far) to find space"); } newLargestNumberOfSteps = i; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java index 54e667c..2767414 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastLongHashTable.java @@ -47,12 +47,8 @@ public static final Logger LOG = LoggerFactory.getLogger(VectorMapJoinFastLongHashTable.class); - private transient final boolean isLogDebugEnabled = LOG.isDebugEnabled(); - private final HashTableKeyType hashTableKeyType; - private final boolean isOuterJoin; - private final BinarySortableDeserializeRead keyBinarySortableDeserializeRead; private final boolean useMinMax; @@ -131,7 +127,7 @@ public void add(long key, BytesWritable currentValue) { } if (largestNumberOfSteps < i) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Probed " + i + " slots (the longest so far) to find space"); } largestNumberOfSteps = i; @@ -195,7 +191,7 @@ private void expandAndRehash() { } if (newLargestNumberOfSteps < i) { - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Probed " + i + " slots (the longest so far) to find space"); } newLargestNumberOfSteps = i; @@ -269,7 +265,6 @@ public VectorMapJoinFastLongHashTable( boolean minMaxEnabled, boolean isOuterJoin, HashTableKeyType hashTableKeyType, int initialCapacity, float loadFactor, int writeBuffersSize, long estimatedKeyCount) { super(initialCapacity, loadFactor, writeBuffersSize, estimatedKeyCount); - this.isOuterJoin = isOuterJoin; this.hashTableKeyType = hashTableKeyType; PrimitiveTypeInfo[] primitiveTypeInfos = { hashTableKeyType.getPrimitiveTypeInfo() }; keyBinarySortableDeserializeRead = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java index fc5aea5..d5e49cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/reducesink/VectorReduceSinkCommonOperator.java @@ -242,7 +242,7 @@ public VectorReduceSinkCommonOperator(CompilationOpContext ctx, protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); - if (isLogDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("useUniformHash " + vectorReduceSinkInfo.getUseUniformHash()); LOG.debug("reduceSinkKeyColumnMap " + @@ -300,7 +300,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { reduceSkipTag = conf.getSkipTag(); reduceTagByte = (byte) conf.getTag(); - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info("Using tag = " + (int) reduceTagByte); } @@ -337,7 +337,7 @@ protected void collect(BytesWritable keyWritable, Writable valueWritable) throws // forward is not called if (null != out) { numRows++; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { if (numRows == cntr) { cntr = logEveryNRows == 0 ? cntr * 10 : numRows + logEveryNRows; if (cntr < 0 || numRows < 0) { @@ -362,7 +362,7 @@ protected void collect(BytesWritable keyWritable, Writable valueWritable) throws protected void closeOp(boolean abort) throws HiveException { super.closeOp(abort); out = null; - if (isLogInfoEnabled) { + if (LOG.isInfoEnabled()) { LOG.info(toString() + ": records written - " + numRows); } recordCounter.set(numRows); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java index 9299306..58ea8f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java @@ -54,7 +54,6 @@ /** Metastore-based footer cache storing serialized footers. Also has a local cache. */ public class ExternalCache implements FooterCache { private static final Logger LOG = LoggerFactory.getLogger(ExternalCache.class); - private static boolean isDebugEnabled = LOG.isDebugEnabled(); private final LocalCache localCache; private final ExternalFooterCachesByConf externalCacheSrc; @@ -194,7 +193,7 @@ private void processPpdResult(MetadataPpdResult mpr, HdfsFileStatusWithId file, Long fileId = file.getFileId(); if (fileId == null) { if (!isInTest) { - if (!isWarnLogged || isDebugEnabled) { + if (!isWarnLogged || LOG.isDebugEnabled()) { LOG.warn("Not using metastore cache because fileId is missing: " + fs.getPath()); isWarnLogged = true; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 8fb7211..0ef7c75 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -158,7 +158,6 @@ } private static final Logger LOG = LoggerFactory.getLogger(OrcInputFormat.class); - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); static final HadoopShims SHIMS = ShimLoader.getHadoopShims(); private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024; @@ -1682,7 +1681,7 @@ private long computeProjectionSize(List fileTypes, allowSyntheticFileIds); for (SplitStrategy splitStrategy : splitStrategies) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Split strategy: {}", splitStrategy); } @@ -1725,7 +1724,7 @@ private long computeProjectionSize(List fileTypes, + context.numFilesCounter.get()); } - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { for (OrcSplit split : splits) { LOG.debug(split + " projected_columns_uncompressed_size: " + split.getColumnarProjectionSize()); @@ -1795,7 +1794,7 @@ private static void scheduleSplits(ETLSplitStrategy splitStrategy, Context conte @Override public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("getSplits started"); } Configuration conf = job; @@ -1805,7 +1804,7 @@ private static void scheduleSplits(ETLSplitStrategy splitStrategy, Context conte } List result = generateSplitsInfo(conf, new Context(conf, numSplits, createExternalCaches())); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("getSplits finished"); } return result.toArray(new InputSplit[result.size()]); @@ -2100,7 +2099,7 @@ static int getBucketForSplit(Configuration conf, OrcSplit orcSplit) { for (int i = 0; i < includeStripe.length; ++i) { includeStripe[i] = (i >= stripeStats.size()) || isStripeSatisfyPredicate(stripeStats.get(i), sarg, filterColumns, evolution); - if (isDebugEnabled && !includeStripe[i]) { + if (LOG.isDebugEnabled() && !includeStripe[i]) { LOG.debug("Eliminating ORC stripe-" + i + " of file '" + filePath + "' as it did not satisfy predicate condition."); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 6844713..fc6adaf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -103,8 +103,6 @@ public class StatsRulesProcFactory { private static final Logger LOG = LoggerFactory.getLogger(StatsRulesProcFactory.class.getName()); - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); - /** * Collect basic statistics like number of rows, data size and column level statistics from the @@ -130,7 +128,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop); tsop.setStatistics(stats.clone()); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName() + "): " + stats.extendedToString()); } @@ -194,14 +192,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } sop.setStatistics(stats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + sop.toString() + ": " + stats.extendedToString()); } } else { if (parentStats != null) { sop.setStatistics(parentStats.clone()); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[1] STATS-" + sop.toString() + ": " + parentStats.extendedToString()); } } @@ -291,7 +289,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, updateStats(st, newNumRows, true, fop); } - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + fop.toString() + ": " + st.extendedToString()); } } else { @@ -301,7 +299,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, updateStats(st, newNumRows, false, fop); } - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[1] STATS-" + fop.toString() + ": " + st.extendedToString()); } } @@ -321,7 +319,7 @@ private long evaluateExpression(Statistics stats, ExprNodeDesc pred, Statistics andStats = null; if (stats.getNumRows() <= 1 || stats.getDataSize() <= 0) { - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Estimating row count for " + pred + " Original num rows: " + stats.getNumRows() + " Original data size: " + stats.getDataSize() + " New num rows: 1"); } @@ -403,7 +401,7 @@ private long evaluateExpression(Statistics stats, ExprNodeDesc pred, } } - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("Estimating row count for " + pred + " Original num rows: " + stats.getNumRows() + " New num rows: " + newNumRows); } @@ -1057,7 +1055,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, parallelism = (int) Math.ceil((double) inputSize / maxSplitSize); } - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("STATS-" + gop.toString() + ": inputSize: " + inputSize + " maxSplitSize: " + maxSplitSize + " parallelism: " + parallelism + " containsGroupingSet: " + containsGroupingSet + " sizeOfGroupingSet: " + sizeOfGroupingSet); @@ -1070,7 +1068,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // check if map side aggregation is possible or not based on column stats hashAgg = checkMapSideAggregation(gop, colStats, conf); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("STATS-" + gop.toString() + " hashAgg: " + hashAgg); } @@ -1109,7 +1107,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (ndvProduct == 0) { ndvProduct = parentNumRows / 2; - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("STATS-" + gop.toString() + ": ndvProduct became 0 as some column does not" + " have stats. ndvProduct changed to: " + ndvProduct); } @@ -1124,14 +1122,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, (StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet)) / 2, StatsUtils.safeMult(StatsUtils.safeMult(ndvProduct, parallelism), sizeOfGroupingSet)); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 4] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } else { // Case 3: column stats, hash aggregation, NO grouping sets cardinality = Math.min(parentNumRows / 2, StatsUtils.safeMult(ndvProduct, parallelism)); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 3] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } @@ -1140,14 +1138,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Case 6: column stats, NO hash aggregation, grouping sets cardinality = StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 6] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } else { // Case 5: column stats, NO hash aggregation, NO grouping sets cardinality = parentNumRows; - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 5] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } @@ -1166,14 +1164,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, sizeOfGroupingSet = mGop.getConf().getListGroupingSets().size(); cardinality = Math.min(parentNumRows, StatsUtils.safeMult(ndvProduct, sizeOfGroupingSet)); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 8] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } else { // Case 9: column stats, NO grouping sets cardinality = Math.min(parentNumRows, ndvProduct); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 9] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } @@ -1196,14 +1194,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Case 2: NO column stats, NO hash aggregation, grouping sets cardinality = StatsUtils.safeMult(parentNumRows, sizeOfGroupingSet); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 2] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } else { // Case 1: NO column stats, NO hash aggregation, NO grouping sets cardinality = parentNumRows; - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 1] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } @@ -1212,7 +1210,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Case 7: NO column stats cardinality = parentNumRows / 2; - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[Case 7] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } @@ -1263,7 +1261,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, gop.setStatistics(stats); - if (isDebugEnabled && stats != null) { + if (LOG.isDebugEnabled() && stats != null) { LOG.debug("[0] STATS-" + gop.toString() + ": " + stats.extendedToString()); } } catch (CloneNotSupportedException e) { @@ -1548,7 +1546,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, updateColStats(conf, stats, newRowCount, jop, rowCountParents); jop.setStatistics(stats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + jop.toString() + ": " + stats.extendedToString()); } } else { @@ -1606,7 +1604,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, wcStats.setDataSize(newDataSize); jop.setStatistics(wcStats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[1] STATS-" + jop.toString() + ": " + wcStats.extendedToString()); } } @@ -1641,7 +1639,7 @@ private long inferPKFKRelationship(int numAttr, List parentIds = Lists.newArrayList(); // print primary key containing parents @@ -2060,7 +2058,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } lop.setStatistics(stats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + lop.toString() + ": " + stats.extendedToString()); } } else { @@ -2079,7 +2077,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } lop.setStatistics(wcStats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[1] STATS-" + lop.toString() + ": " + wcStats.extendedToString()); } } @@ -2147,7 +2145,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, outStats.setColumnStats(colStats); } rop.setStatistics(outStats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + rop.toString() + ": " + outStats.extendedToString()); } } catch (CloneNotSupportedException e) { @@ -2192,7 +2190,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, stats.addToColumnStats(colStats); op.getConf().setStatistics(stats); - if (isDebugEnabled) { + if (LOG.isDebugEnabled()) { LOG.debug("[0] STATS-" + op.toString() + ": " + stats.extendedToString()); } } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java index aa19d09..d43aa35 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyBinary.java @@ -27,8 +27,7 @@ public class LazyBinary extends LazyPrimitive { private static final Logger LOG = LoggerFactory.getLogger(LazyBinary.class); - private static final boolean DEBUG_LOG_ENABLED = LOG.isDebugEnabled(); - + public LazyBinary(LazyBinaryObjectInspector oi) { super(oi); data = new BytesWritable(); @@ -56,7 +55,7 @@ public void init(ByteArrayRef bytes, int start, int length) { // todo this should be configured in serde public static byte[] decodeIfNeeded(byte[] recv) { boolean arrayByteBase64 = Base64.isArrayByteBase64(recv); - if (DEBUG_LOG_ENABLED && arrayByteBase64) { + if (LOG.isDebugEnabled() && arrayByteBase64) { LOG.debug("Data only contains Base64 alphabets only so try to decode the data."); } return arrayByteBase64 ? Base64.decodeBase64(recv) : recv;