diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java index b9be486..caa4528 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java @@ -81,7 +81,9 @@ public void processOp(Object row, int tag) throws HiveException { Writable writableRow = serializer.serialize(row, rowInspector); writableRow.write(buffer); if (buffer.getLength() > MAX_SIZE) { - LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); + if (isLogInfoEnabled) { + LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); + } hasReachedMaxSize = true; buffer = null; } @@ -89,7 +91,7 @@ public void processOp(Object row, int tag) throws HiveException { throw new HiveException(e); } - if (LOG.isDebugEnabled()) { + if (isLogDebugEnabled) { LOG.debug("AppMasterEvent: " + row); } forward(row, rowInspector); @@ -116,8 +118,10 @@ public void closeOp(boolean abort) throws HiveException { InputInitializerEvent.create(vertexName, inputName, ByteBuffer.wrap(payload, 0, payload.length)); - LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName - + ". Payload size = " + payload.length); + if (isLogInfoEnabled) { + LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName + + ". Payload size = " + payload.length); + } context.getTezProcessorContext().sendEvents(Collections.singletonList(event)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java index 8b3489f..1d5ebb1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java @@ -316,7 +316,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { } } - LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz); + if (isLogInfoEnabled) { + LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " + totalSz); + } } transient boolean newGroupStarted = false; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java index 772dda6..48ce67a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java @@ -176,7 +176,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { } newChildOperatorsTag[i] = toArray(childOperatorTags); } - LOG.info("newChildOperatorsTag " + newChildOperatorsTag); + if (isLogInfoEnabled) { + LOG.info("newChildOperatorsTag " + newChildOperatorsTag); + } initializeChildren(hconf); } @@ -200,12 +202,16 @@ protected void initializeOp(Configuration hconf) throws HiveException { @Override protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; - LOG.info("Operator " + id + " " + getName() + " initialized"); - LOG.info("Initializing children of " + id + " " + getName()); + if (isLogInfoEnabled) { + LOG.info("Operator " + id + " " + getName() + " initialized"); + LOG.info("Initializing children of " + id + " " + getName()); + } for (int i = 0; i < childOperatorsArray.length; i++) { - LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + - childOperatorsArray[i].getName() + - " " + childInputObjInspectors[i].length); + if (isLogInfoEnabled) { + LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + + childOperatorsArray[i].getName() + + " " + childInputObjInspectors[i].length); + } // We need to initialize those MuxOperators first because if we first // initialize other operators, the states of all parents of those MuxOperators // are INIT (including this DemuxOperator), @@ -229,9 +235,11 @@ protected void initializeChildren(Configuration hconf) throws HiveException { } } for (int i = 0; i < childOperatorsArray.length; i++) { - LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + - childOperatorsArray[i].getName() + - " " + childInputObjInspectors[i].length); + if (isLogInfoEnabled) { + LOG.info("Initializing child " + i + " " + childOperatorsArray[i].getIdentifier() + " " + + childOperatorsArray[i].getName() + + " " + childInputObjInspectors[i].length); + } if (!(childOperatorsArray[i] instanceof MuxOperator)) { childOperatorsArray[i].initialize(hconf, childInputObjInspectors[i]); } else { @@ -255,10 +263,10 @@ public void processOp(Object row, int tag) throws HiveException { endGroupIfNecessary(currentChildIndex); int oldTag = newTagToOldTag[tag]; - if (isLogInfoEnabled) { + if (isLogDebugEnabled) { cntrs[tag]++; if (cntrs[tag] == nextCntrs[tag]) { - LOG.info(id + " (newTag, childIndex, oldTag)=(" + tag + ", " + currentChildIndex + ", " + LOG.debug(id + " (newTag, childIndex, oldTag)=(" + tag + ", " + currentChildIndex + ", " + oldTag + "), forwarding " + cntrs[tag] + " rows"); nextCntrs[tag] = getNextCntr(cntrs[tag]); } @@ -291,8 +299,10 @@ protected void closeOp(boolean abort) throws HiveException { int newTag = i; int oldTag = newTagToOldTag[i]; int childIndex = newTagToChildIndex[newTag]; - LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", " - + oldTag + "), forwarded " + cntrs[newTag] + " rows"); + if (isLogInfoEnabled) { + LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " + childIndex + ", " + + oldTag + "), forwarded " + cntrs[newTag] + " rows"); + } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 196f2c0..810aaa6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -391,7 +391,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { valToPaths.put("", fsp); // special entry for non-DP case } } - + final StoragePolicyValue tmpStorage = StoragePolicyValue.lookup(HiveConf .getVar(hconf, HIVE_TEMPORARY_TABLE_STORAGE)); if (isTemporary && fsp != null @@ -702,7 +702,7 @@ public void processOp(Object row, int tag) throws HiveException { fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1); } - if (++numRows == cntr) { + if ((++numRows == cntr) && isLogInfoEnabled) { cntr *= 10; LOG.info(toString() + ": records written - " + numRows); } @@ -967,7 +967,7 @@ private String getDynPartDirectory(List row, List dpColNames) { public void closeOp(boolean abort) throws HiveException { row_count.set(numRows); - LOG.info(toString() + ": records written - " + numRows); + LOG.info(toString() + ": records written - " + numRows); if (!bDynParts && !filesCreated) { createBucketFiles(fsp); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index 7918194..dfee3a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -961,12 +961,16 @@ private void flushHashTable(boolean complete) throws HiveException { } hashAggregations.clear(); hashAggregations = null; - LOG.info("Hash Table completed flushed"); + if (isLogInfoEnabled) { + LOG.info("Hash Table completed flushed"); + } return; } int oldSize = hashAggregations.size(); - LOG.info("Hash Tbl flush: #hash table = " + oldSize); + if (isLogInfoEnabled) { + LOG.info("Hash Tbl flush: #hash table = " + oldSize); + } Iterator> iter = hashAggregations .entrySet().iterator(); int numDel = 0; @@ -976,7 +980,9 @@ private void flushHashTable(boolean complete) throws HiveException { iter.remove(); numDel++; if (numDel * 10 >= oldSize) { - LOG.info("Hash Table flushed: new size = " + hashAggregations.size()); + if (isLogInfoEnabled) { + LOG.info("Hash Table flushed: new size = " + hashAggregations.size()); + } return; } } @@ -1015,8 +1021,10 @@ private void forward(Object[] keys, AggregationBuffer[] aggs) throws HiveExcepti public void flush() throws HiveException{ try { if (hashAggregations != null) { - LOG.info("Begin Hash Table flush: size = " - + hashAggregations.size()); + if (isLogInfoEnabled) { + LOG.info("Begin Hash Table flush: size = " + + hashAggregations.size()); + } Iterator iter = hashAggregations.entrySet().iterator(); while (iter.hasNext()) { Map.Entry m = (Map.Entry) iter diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java index 64c1552..80ce96a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java @@ -73,7 +73,7 @@ /** * The filters for join */ - private transient List[] joinFilters; + private transient List[] joinFilters; private transient int[][] filterMaps; @@ -103,7 +103,7 @@ protected transient LogHelper console; private long hashTableScale; private MapJoinMemoryExhaustionHandler memoryExhaustionHandler; - + public HashTableSinkOperator() { } @@ -265,7 +265,9 @@ private boolean hasFilter(int alias) { public void closeOp(boolean abort) throws HiveException { try { if (mapJoinTables == null) { - LOG.debug("mapJoinTables is null"); + if (isLogDebugEnabled) { + LOG.debug("mapJoinTables is null"); + } } else { flushToFile(); } @@ -280,7 +282,9 @@ public void closeOp(boolean abort) throws HiveException { protected void flushToFile() throws IOException, HiveException { // get tmp file URI Path tmpURI = getExecContext().getLocalWork().getTmpPath(); - LOG.info("Temp URI for side table: " + tmpURI); + if (isLogInfoEnabled) { + LOG.info("Temp URI for side table: " + tmpURI); + } for (byte tag = 0; tag < mapJoinTables.length; tag++) { // get the key and value MapJoinPersistableTableContainer tableContainer = mapJoinTables[tag]; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index 878df75..f49cb2a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -104,12 +104,12 @@ public void processOp(Object row, int tag) throws HiveException { storage[alias].clearRows(); } } else { - if (sz == nextSz) { + if (isLogInfoEnabled && (sz == nextSz)) { // Print a message if we reached at least 1000 rows for a join operand // We won't print a message for the last join operand since the size // will never goes to joinEmitInterval. - LOG.info("table " + alias + " has " + sz + " rows for join key " - + keyObject); + LOG.info("table " + alias + " has " + sz + " rows for join key " + + keyObject); nextSz = getNextSize(nextSz); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index ff42591..c6bfd03 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -110,10 +110,14 @@ protected void initializeOp(Configuration hconf) throws HiveException { mapJoinTables = (MapJoinTableContainer[]) cache.retrieve(tableKey); mapJoinTableSerdes = (MapJoinTableContainerSerDe[]) cache.retrieve(serdeKey); hashTblInitedOnce = true; - LOG.info("Try to retrieve from cache"); + if (isLogInfoEnabled) { + LOG.info("Try to retrieve from cache"); + } if (mapJoinTables == null || mapJoinTableSerdes == null) { - LOG.info("Did not find tables in cache"); + if (isLogInfoEnabled) { + LOG.info("Did not find tables in cache"); + } mapJoinTables = new MapJoinTableContainer[tagLen]; mapJoinTableSerdes = new MapJoinTableContainerSerDe[tagLen]; hashTblInitedOnce = false; @@ -200,7 +204,9 @@ private void loadHashTable() throws HiveException { * requires changes in the Tez API with regard to finding bucket id and * also ability to schedule tasks to re-use containers that have cached the specific bucket. */ - LOG.info("This is not bucket map join, so cache"); + if (isLogInfoEnabled) { + LOG.info("This is not bucket map join, so cache"); + } cache.cache(tableKey, mapJoinTables); cache.cache(serdeKey, mapJoinTableSerdes); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index 851ea1b..5c211c7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -450,7 +450,7 @@ public void cleanUpInputFileChangedOp() throws HiveException { builder.append(context.alias); } if (isLogDebugEnabled) { - LOG.info("Processing alias(es) " + builder.toString() + " for file " + fpath); + LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath); } } // Add alias, table name, and partitions to hadoop conf so that their diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java index b10a7fa..5969050 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java @@ -214,11 +214,15 @@ protected void initializeOp(Configuration hconf) throws HiveException { @Override protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; - LOG.info("Operator " + id + " " + getName() + " initialized"); + if (isLogInfoEnabled) { + LOG.info("Operator " + id + " " + getName() + " initialized"); + } if (childOperators == null || childOperators.isEmpty()) { return; } - LOG.info("Initializing children of " + id + " " + getName()); + if (isLogInfoEnabled) { + LOG.info("Initializing children of " + id + " " + getName()); + } childOperatorsArray[0].initialize(hconf, outputObjectInspectors); if (reporter != null) { childOperatorsArray[0].setReporter(reporter); @@ -302,8 +306,10 @@ public void processGroup(int tag) throws HiveException { @Override protected void closeOp(boolean abort) throws HiveException { - for (int i = 0; i < numParents; i++) { - LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows"); + if (isLogInfoEnabled) { + for (int i = 0; i < numParents; i++) { + LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows"); + } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java index d2b5c05..c491a47 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java @@ -343,7 +343,9 @@ public void initialize(Configuration hconf, ObjectInspector[] inputOIs) return; } - LOG.info("Initializing Self " + this); + if (isLogInfoEnabled) { + LOG.info("Initializing Self " + this); + } if (inputOIs != null) { inputObjInspectors = inputOIs; @@ -391,7 +393,9 @@ public void initialize(Configuration hconf, ObjectInspector[] inputOIs) "Internal Hive error during operator initialization."); } - LOG.info("Initialization Done " + id + " " + getName()); + if (isLogInfoEnabled) { + LOG.info("Initialization Done " + id + " " + getName()); + } } public void initializeLocalWork(Configuration hconf) throws HiveException { @@ -416,11 +420,15 @@ protected void initializeOp(Configuration hconf) throws HiveException { */ protected void initializeChildren(Configuration hconf) throws HiveException { state = State.INIT; - LOG.info("Operator " + id + " " + getName() + " initialized"); + if (isLogInfoEnabled) { + LOG.info("Operator " + id + " " + getName() + " initialized"); + } if (childOperators == null || childOperators.isEmpty()) { return; } - LOG.info("Initializing children of " + id + " " + getName()); + if (isLogInfoEnabled) { + LOG.info("Initializing children of " + id + " " + getName()); + } for (int i = 0; i < childOperatorsArray.length; i++) { childOperatorsArray[i].initialize(hconf, outputObjInspector, childOperatorsTag[i]); @@ -455,7 +463,9 @@ public void passExecContext(ExecMapperContext execContext) { */ protected void initialize(Configuration hconf, ObjectInspector inputOI, int parentId) throws HiveException { - LOG.info("Initializing child " + id + " " + getName()); + if (isLogInfoEnabled) { + LOG.info("Initializing child " + id + " " + getName()); + } // Double the size of the array if needed if (parentId >= inputObjInspectors.length) { int newLength = inputObjInspectors.length * 2; @@ -565,7 +575,9 @@ protected boolean allInitializedParentsAreClosed() { if(parent==null){ continue; } - LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state); + if (isLogDebugEnabled) { + LOG.debug("allInitializedParentsAreClosed? parent.state = " + parent.state); + } if (!(parent.state == State.CLOSE || parent.state == State.UNINIT)) { return false; } @@ -585,14 +597,18 @@ public void close(boolean abort) throws HiveException { // check if all parents are finished if (!allInitializedParentsAreClosed()) { - LOG.debug("Not all parent operators are closed. Not closing."); + if (isLogDebugEnabled) { + LOG.debug("Not all parent operators are closed. Not closing."); + } return; } // set state as CLOSE as long as all parents are closed // state == CLOSE doesn't mean all children are also in state CLOSE state = State.CLOSE; - LOG.info(id + " finished. closing... "); + if (isLogInfoEnabled) { + LOG.info(id + " finished. closing... "); + } // call the operator specific close routine closeOp(abort); @@ -606,11 +622,15 @@ public void close(boolean abort) throws HiveException { } for (Operator op : childOperators) { - LOG.debug("Closing child = " + op); + if (isLogDebugEnabled) { + LOG.debug("Closing child = " + op); + } op.close(abort); } - LOG.info(id + " Close done"); + if (isLogInfoEnabled) { + LOG.info(id + " Close done"); + } } catch (HiveException e) { e.printStackTrace(); throw e; @@ -856,8 +876,10 @@ public void preorderMap(OperatorFunc opFunc) { } public void logStats() { - for (String e : statsMap.keySet()) { - LOG.info(e.toString() + ":" + statsMap.get(e).toString()); + if (isLogInfoEnabled) { + for (String e : statsMap.keySet()) { + LOG.info(e.toString() + ":" + statsMap.get(e).toString()); + } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java index bad88d3..0e2552b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java @@ -81,7 +81,9 @@ private void processKeyValuePairs(Object key, Object value) if (prevPath == null) { prevPath = k.getInputPath(); reader = OrcFile.createReader(fs, k.getInputPath()); - LOG.info("ORC merge file input path: " + k.getInputPath()); + if (isLogInfoEnabled) { + LOG.info("ORC merge file input path: " + k.getInputPath()); + } } // store the orc configuration from the first file. All other files should @@ -100,7 +102,9 @@ private void processKeyValuePairs(Object key, Object value) .version(version) .rowIndexStride(rowIndexStride) .inspector(reader.getObjectInspector())); - LOG.info("ORC merge file output path: " + outPath); + if (isLogDebugEnabled) { + LOG.info("ORC merge file output path: " + outPath); + } } if (!checkCompatibility(k)) { @@ -123,9 +127,11 @@ private void processKeyValuePairs(Object key, Object value) outWriter.appendStripe(buffer, 0, buffer.length, v.getStripeInformation(), v.getStripeStatistics()); - LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : " - + v.getStripeInformation().getOffset() + " length: " - + v.getStripeInformation().getLength() + " ]"); + if (isLogInfoEnabled) { + LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : " + + v.getStripeInformation().getOffset() + " length: " + + v.getStripeInformation().getLength() + " ]"); + } // add user metadata to footer in case of any if (v.isLastStripeInFile()) { @@ -151,33 +157,43 @@ private void processKeyValuePairs(Object key, Object value) private boolean checkCompatibility(OrcFileKeyWrapper k) { // check compatibility with subsequent files if ((k.getTypes().get(0).getSubtypesCount() != columnCount)) { - LOG.info("Incompatible ORC file merge! Column counts does not match for " - + k.getInputPath()); + if (isLogInfoEnabled) { + LOG.info("Incompatible ORC file merge! Column counts does not match for " + + k.getInputPath()); + } return false; } if (!k.getCompression().equals(compression)) { - LOG.info("Incompatible ORC file merge! Compression codec does not match" + - " for " + k.getInputPath()); + if (isLogInfoEnabled) { + LOG.info("Incompatible ORC file merge! Compression codec does not match" + + " for " + k.getInputPath()); + } return false; } if (k.getCompressBufferSize() != compressBuffSize) { - LOG.info("Incompatible ORC file merge! Compression buffer size does not" + - " match for " + k.getInputPath()); + if (isLogInfoEnabled) { + LOG.info("Incompatible ORC file merge! Compression buffer size does not" + + " match for " + k.getInputPath()); + } return false; } if (!k.getVersion().equals(version)) { - LOG.info("Incompatible ORC file merge! Version does not match for " - + k.getInputPath()); + if (isLogInfoEnabled) { + LOG.info("Incompatible ORC file merge! Version does not match for " + + k.getInputPath()); + } return false; } if (k.getRowIndexStride() != rowIndexStride) { - LOG.info("Incompatible ORC file merge! Row index stride does not match" + - " for " + k.getInputPath()); + if (isLogInfoEnabled) { + LOG.info("Incompatible ORC file merge! Row index stride does not match" + + " for " + k.getInputPath()); + } return false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java index 2c9e81f..8d8f49c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java @@ -226,7 +226,7 @@ private byte tagForAlias(String alias) { public void cleanUpInputFileChangedOp() throws HiveException { inputFileChanged = true; } - + protected List smbJoinComputeKeys(Object row, byte alias) throws HiveException { return JoinUtil.computeKeys(row, joinKeys[alias], joinKeysObjectInspectors[alias]); @@ -265,8 +265,8 @@ public void processOp(Object row, int tag) throws HiveException { byte alias = (byte) tag; // compute keys and values as StandardObjects - List key = smbJoinComputeKeys(row, alias); - + List key = smbJoinComputeKeys(row, alias); + List value = getFilteredValue(alias, row); @@ -527,7 +527,9 @@ private void setUpFetchContexts(String alias, MergeQueue mergeQueue) throws Hive BucketMatcher bucketMatcher = ReflectionUtils.newInstance(bucketMatcherCls, null); getExecContext().setFileId(bucketMatcherCxt.createFileId(currentInputPath.toString())); - LOG.info("set task id: " + getExecContext().getFileId()); + if (isLogInfoEnabled) { + LOG.info("set task id: " + getExecContext().getFileId()); + } bucketMatcher.setAliasBucketFileNameMapping(bucketMatcherCxt .getAliasBucketFileNameMapping()); @@ -751,7 +753,9 @@ public final InspectableObject getNextRow() throws IOException { } Integer current = top(); if (current == null) { - LOG.info("MergeQueue forwarded " + counter + " rows"); + if (isLogInfoEnabled) { + LOG.info("MergeQueue forwarded " + counter + " rows"); + } return null; } counter++; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index 1aebe28..6f6f5fa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -303,10 +303,11 @@ boolean allowPartialConsumption() { } void displayBrokenPipeInfo() { - LOG - .info("The script did not consume all input data. This is considered as an error."); - LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() - + "=true; to ignore it."); + if (isLogInfoEnabled) { + LOG.info("The script did not consume all input data. This is considered as an error."); + LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + + "=true; to ignore it."); + } return; } @@ -347,10 +348,12 @@ public void processOp(Object row, int tag) throws HiveException { } String[] wrappedCmdArgs = addWrapper(cmdArgs); - LOG.info("Executing " + Arrays.asList(wrappedCmdArgs)); - LOG.info("tablename=" + tableName); - LOG.info("partname=" + partitionName); - LOG.info("alias=" + alias); + if (isLogInfoEnabled) { + LOG.info("Executing " + Arrays.asList(wrappedCmdArgs)); + LOG.info("tablename=" + tableName); + LOG.info("partname=" + partitionName); + LOG.info("alias=" + alias); + } ProcessBuilder pb = new ProcessBuilder(wrappedCmdArgs); Map env = pb.environment(); @@ -442,8 +445,7 @@ public void processOp(Object row, int tag) throws HiveException { + StringUtils.stringifyException(e2)); } setDone(true); - LOG - .warn("Got broken pipe during write: ignoring exception and setting operator to done"); + LOG.warn("Got broken pipe during write: ignoring exception and setting operator to done"); } else { LOG.error("Error in writing to script: " + e.getMessage()); if (isBrokenPipeException(e)) { @@ -666,7 +668,9 @@ public void processLine(Writable line) throws HiveException { long now = System.currentTimeMillis(); // reporter is a member variable of the Operator class. if (now - lastReportTime > 60 * 1000 && reporter != null) { - LOG.info("ErrorStreamProcessor calling reporter.progress()"); + if (isLogInfoEnabled) { + LOG.info("ErrorStreamProcessor calling reporter.progress()"); + } lastReportTime = now; reporter.progress(); } @@ -721,7 +725,9 @@ public void run() { } proc.processLine(row); } - LOG.info("StreamThread " + name + " done"); + if (isLogInfoEnabled) { + LOG.info("StreamThread " + name + " done"); + } } catch (Throwable th) { scriptError = th; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java index 4dadc16..1642926 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java @@ -59,7 +59,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { eval = ExprNodeEvaluatorFactory.toCachedEvals(eval); } output = new Object[eval.length]; - LOG.info("SELECT " + inputObjInspectors[0].getTypeName()); + if (isLogInfoEnabled) { + LOG.info("SELECT " + inputObjInspectors[0].getTypeName()); + } outputObjInspector = initEvaluatorsAndReturnStruct(eval, conf.getOutputColumnNames(), inputObjInspectors[0]); initializeChildren(hconf); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java index cb010fb..cc5fe5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java @@ -153,7 +153,9 @@ private void gatherStats(Object row) { values.add(o == null ? defaultPartitionName : o.toString()); } partitionSpecs = FileUtils.makePartName(conf.getPartColumns(), values); - LOG.info("Stats Gathering found a new partition spec = " + partitionSpecs); + if (isLogInfoEnabled) { + LOG.info("Stats Gathering found a new partition spec = " + partitionSpecs); + } } // find which column contains the raw data size (both partitioned and non partitioned int uSizeColumn = -1; @@ -279,7 +281,9 @@ private void publishStats() throws HiveException { StatsPublisher statsPublisher = Utilities.getStatsPublisher(jc); if (!statsPublisher.connect(jc)) { // just return, stats gathering should not block the main query. - LOG.info("StatsPublishing error: cannot connect to database."); + if (isLogInfoEnabled) { + LOG.info("StatsPublishing error: cannot connect to database."); + } if (isStatsReliable) { throw new HiveException(ErrorMsg.STATSPUBLISHER_CONNECTION_ERROR.getErrorCodedMsg()); } @@ -307,7 +311,9 @@ private void publishStats() throws HiveException { throw new HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg()); } } - LOG.info("publishing : " + key + " : " + statsToPublish.toString()); + if (isLogInfoEnabled) { + LOG.info("publishing : " + key + " : " + statsToPublish.toString()); + } } if (!statsPublisher.closeConnection()) { if (isStatsReliable) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java index db10f93..0fe176b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java @@ -111,7 +111,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // to // create ObjectInspectors. needsTransform[p] = (inputObjInspectors[p] != outputObjInspector); - if (needsTransform[p]) { + if (isLogInfoEnabled && needsTransform[p]) { LOG.info("Union Operator needs to transform row from parent[" + p + "] from " + inputObjInspectors[p] + " to " + outputObjInspector); }