diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java index 0d7b92d649..43286659e3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java @@ -60,6 +60,8 @@ import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TJSONProtocol; import org.apache.thrift.transport.TMemoryBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; @@ -71,6 +73,8 @@ public class QueryPlan implements Serializable { private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(QueryPlan.class); + private String cboInfo; private String queryString; private String optimizedCBOPlan; @@ -643,7 +647,7 @@ public String toString() { try { return getJSONQuery(getQueryPlan()); } catch (Exception e) { - e.printStackTrace(); + LOG.warn("Unable to produce query plan JSON string", e); return e.toString(); } } @@ -655,8 +659,7 @@ public String toThriftJSONString() throws IOException { try { q.write(oprot); } catch (TException e) { - // TODO Auto-generated catch block - e.printStackTrace(); + LOG.warn("Unable to produce query plan Thrift string", e); return q.toString(); } return tmb.toString("UTF-8"); @@ -669,8 +672,7 @@ public String toBinaryString() throws IOException { try { q.write(oprot); } catch (TException e) { - // TODO Auto-generated catch block - e.printStackTrace(); + LOG.warn("Unable to produce query plan binary string", e); return q.toString(); } byte[] buf = new byte[tmb.length()]; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLSemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLSemanticAnalyzerFactory.java index 06f60ab756..9d4bf79af9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLSemanticAnalyzerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLSemanticAnalyzerFactory.java @@ -113,7 +113,6 @@ public static BaseSemanticAnalyzer getAnalyzer(ASTNode root, QueryState querySta BaseSemanticAnalyzer analyzer = analyzerClass.getConstructor(QueryState.class).newInstance(queryState); return analyzer; } catch (Exception e) { - e.printStackTrace(); throw new RuntimeException(e); } } @@ -126,7 +125,6 @@ public static BaseSemanticAnalyzer getAnalyzer(ASTNode root, QueryState querySta analyzerClass.getConstructor(QueryState.class, Hive.class).newInstance(queryState, db); return analyzer; } catch (Exception e) { - e.printStackTrace(); throw new RuntimeException(e); } } @@ -148,7 +146,6 @@ public static BaseSemanticAnalyzer getAnalyzer(ASTNode root, QueryState querySta return TYPE_TO_ANALYZER.get(actualType); } } catch (Exception e) { - e.printStackTrace(); throw new RuntimeException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 1bb52b0348..04166a23ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -678,7 +678,6 @@ protected void initializeOp(Configuration hconf) throws HiveException { } catch (HiveException e) { throw e; } catch (Exception e) { - e.printStackTrace(); throw new HiveException(e); } } @@ -797,7 +796,6 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { autoDelete = fs.deleteOnExit(fsp.outPaths[0]); } } catch (Exception e) { - e.printStackTrace(); throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java index e8f7dd067e..0b5fa9799b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableDummyOperator.java @@ -50,7 +50,6 @@ protected void initializeOp(Configuration hconf) throws HiveException { this.outputObjInspector = serde.getObjectInspector(); } catch (Exception e) { LOG.error("Generating output obj inspector from dummy object error", e); - e.printStackTrace(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java index 1aae142ba7..3e1100ced4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinUtil.java @@ -46,9 +46,13 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hive.common.util.ReflectionUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class JoinUtil { + private static final Logger LOG = LoggerFactory.getLogger(JoinUtil.class); + /** * Represents the join result between two tables */ @@ -304,7 +308,7 @@ public static AbstractSerDe getSpillSerDe(byte alias, TableDesc[] spillTableDesc try { SerDeUtils.initializeSerDe(sd, null, desc.getProperties(), null); } catch (SerDeException e) { - e.printStackTrace(); + LOG.warn("Error getting spill table", e); return null; } return sd; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index 3210ca5cf8..343c6b313d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -499,8 +499,7 @@ public void close(boolean abort) throws HiveException { new_abort = true; } } catch (IOException e) { - LOG.error("Got ioexception: " + e.getMessage()); - e.printStackTrace(); + LOG.error("Got exception", e); new_abort = true; } catch (InterruptedException e) { } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 60aac5a31e..f51c0fc871 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -234,6 +234,8 @@ */ public static final Logger FILE_OP_LOGGER = LoggerFactory.getLogger("FileOperations"); + public static final Logger LOGGER = LoggerFactory.getLogger(Utilities.class); + /** * The object in the reducer are composed of these top level fields. */ @@ -846,7 +848,7 @@ public static boolean contentsEqual(InputStream is1, InputStream is2, boolean ig } } } catch (FileNotFoundException e) { - e.printStackTrace(); + LOG.warn("Could not compare files. One or both cannot be found", e); } return false; } @@ -1935,7 +1937,7 @@ public static String getBucketFileNameFromPathSubString(String bucketName) { try { return bucketName.split(COPY_KEYWORD)[0]; } catch (Exception e) { - e.printStackTrace(); + LOG.warn("Invalid bucket file name", e); return bucketName; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index 46764d4e30..8a8822d560 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -248,7 +248,6 @@ public int execute() { FileSystem fs = emptyScratchDir.getFileSystem(job); fs.mkdirs(emptyScratchDir); } catch (IOException e) { - e.printStackTrace(); console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); return 5; @@ -428,7 +427,6 @@ public int execute() { returnVal = jobExecHelper.progress(rj, jc, ctx); success = (returnVal == 0); } catch (Exception e) { - e.printStackTrace(); setException(e); String mesg = " with exception '" + Utilities.getNameMessage(e) + "'"; if (rj != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java index dfabfb81e5..24b801e9c6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java @@ -194,7 +194,6 @@ public void onRootVertexInitialized(String inputName, InputDescriptor inputDescr MRInputUserPayloadProto.newBuilder(protoPayload).setGroupingEnabled(true).build(); inputDescriptor.setUserPayload(UserPayload.create(updatedPayload.toByteString().asReadOnlyByteBuffer())); } catch (IOException e) { - e.printStackTrace(); throw new RuntimeException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index 19f688672f..9b2dae36a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -949,14 +949,9 @@ private Vertex createVertex(JobConf conf, ReduceWork reduceWork, FileSystem fs, * Helper method to create a yarn local resource. */ private LocalResource createLocalResource(FileSystem remoteFs, Path file, - LocalResourceType type, LocalResourceVisibility visibility) { + LocalResourceType type, LocalResourceVisibility visibility) throws IOException { - FileStatus fstat = null; - try { - fstat = remoteFs.getFileStatus(file); - } catch (IOException e) { - e.printStackTrace(); - } + final FileStatus fstat = remoteFs.getFileStatus(file); URL resourceURL = ConverterUtils.getYarnUrlFromPath(file); long resourceSize = fstat.getLen(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java index 616f2d6c10..4e74be0746 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryViewer.java @@ -71,7 +71,6 @@ void init() { HiveHistoryUtil.parseHiveHistory(historyFile, this); } catch (IOException e) { // TODO pass on this exception - e.printStackTrace(); LOG.error("Error parsing hive history log file", e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index dbbe6f1ec5..91acb009a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1474,7 +1474,6 @@ else if (prev != null && next.maxWriteId == prev.maxWriteId } return dirToSnapshots; } catch (IOException e) { - e.printStackTrace(); throw new IOException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java index 3e45e45b27..6eba2e5b71 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java @@ -1834,7 +1834,7 @@ public synchronized boolean next(LongWritable readRows) throws IOException { try { ret = nextKeyBuffer(); } catch (EOFException eof) { - eof.printStackTrace(); + LOG.warn("Failed to get next key buffer", eof); } } return (ret > 0) && next(readRows); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index b9bcda9bfc..4b339a6c58 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -623,10 +623,8 @@ private void initWriter() throws IOException { try { AcidUtils.OrcAcidVersion.writeVersionFile(path.getParent(), fs); } catch (Exception e) { - e.printStackTrace(); - // Ignore; might have been created by another concurrent writer, writing to a different bucket - // within this delta/base directory - LOG.trace(e.fillInStackTrace().toString()); + LOG.trace("Ignore; might have been created by another concurrent writer, writing to a" + + " different bucket within this delta/base directory", e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java index 29112f8a89..ca840d7f4e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java @@ -117,7 +117,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. try { hasGroupBy(mapJoinOp, context); } catch (Exception e) { - e.printStackTrace(); + LOG.warn("Failed to determine if has group-by", e); } MapJoinDesc mapJoinDesc = mapJoinOp.getConf(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java index 575e9c27df..54373effe2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java @@ -153,8 +153,7 @@ private MapredWork convertSMBWorkToJoinWork(MapredWork currWork, SMBMapJoinOpera genSMBJoinWork(currJoinWork.getMapWork(), newSMBJoinOp); return currJoinWork; } catch (Exception e) { - e.printStackTrace(); - throw new SemanticException("Generate Map Join Task Error: " + e.getMessage()); + throw new SemanticException("Generate Map Join Task Error", e); } } @@ -314,8 +313,7 @@ private boolean isEligibleForOptimization(SMBMapJoinOperator originalSMBJoinOp) taskToAliases.put(newTask, aliases); } } catch (Exception e) { - e.printStackTrace(); - throw new SemanticException("Generate Map Join Task Error: ", e); + throw new SemanticException("Generate Map Join Task Error", e); } // insert current common join task to conditional task