diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index 325a82d..cbbf736 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.cli; -import static org.apache.hadoop.util.StringUtils.stringifyException; - import java.io.BufferedReader; import java.io.File; import java.io.FileNotFoundException; @@ -154,8 +152,9 @@ public int processCmd(String cmd) { try { ret = processFile(cmd_1); } catch (IOException e) { - console.printError("Failed processing file "+ cmd_1 +" "+ e.getLocalizedMessage(), - stringifyException(e)); + console.printError( + "Failed processing file " + cmd_1 + " " + e.getLocalizedMessage(), + e); ret = 1; } } @@ -177,8 +176,9 @@ public int processCmd(String cmd) { console.printError("Command failed with exit code = " + ret); } } catch (Exception e) { - console.printError("Exception raised from Shell command " + e.getLocalizedMessage(), - stringifyException(e)); + console.printError( + "Exception raised from Shell command " + e.getLocalizedMessage(), + e); ret = 1; } } else { // local mode @@ -193,8 +193,8 @@ public int processCmd(String cmd) { } } } catch (SQLException e) { - console.printError("Failed processing command " + tokens[0] + " " + e.getLocalizedMessage(), - org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError("Failed processing command " + tokens[0] + " " + + e.getLocalizedMessage(), e); ret = 1; } catch (Exception e) { @@ -276,8 +276,8 @@ int processLocalCmd(String cmd, CommandProcessor proc, CliSessionState ss) { } } } catch (IOException e) { - console.printError("Failed with exception " + e.getClass().getName() + ":" + e.getMessage(), - "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError("Failed with exception " + e.getClass().getName() + + ":" + e.getMessage(), e); ret = 1; } diff --git a/common/src/java/org/apache/hive/common/util/HiveStringUtils.java b/common/src/java/org/apache/hive/common/util/HiveStringUtils.java index a4923f9..51985dd 100644 --- a/common/src/java/org/apache/hive/common/util/HiveStringUtils.java +++ b/common/src/java/org/apache/hive/common/util/HiveStringUtils.java @@ -140,9 +140,11 @@ public static String intern(String str) { /** * Make a string representation of the exception. + * @deprecated Use SLF4J logging facilities instead * @param e The exception to stringify * @return A string with exception name and call stack. */ + @Deprecated public static String stringifyException(Throwable e) { StringWriter stm = new StringWriter(); PrintWriter wrt = new PrintWriter(stm); diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidKafkaUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidKafkaUtils.java index c5dc1e8..75220c3 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidKafkaUtils.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidKafkaUtils.java @@ -144,7 +144,7 @@ static KafkaSupervisorSpec createKafkaSupervisorSpec(Table table, static void updateKafkaIngestionSpec(String overlordAddress, KafkaSupervisorSpec spec) { try { String task = JSON_MAPPER.writeValueAsString(spec); - CONSOLE.printInfo("submitting kafka Spec {}", task); + CONSOLE.printInfo("submitting kafka Spec " + task); LOG.info("submitting kafka Supervisor Spec {}", task); FullResponseHolder response = diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 9fe9d05..c1a2033 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -1337,9 +1337,8 @@ private int executeTestCommand(final String command) { int rc = response.getResponseCode(); if (rc != 0) { - SessionState.getConsole() - .printError(response.toString(), - response.getException() != null ? Throwables.getStackTraceAsString(response.getException()) : ""); + SessionState.getConsole().printError(response.toString(), + response.getException()); } return rc; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java index 59bcd5c..da7f070 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.utils.StringUtils; import java.util.List; @@ -47,7 +46,6 @@ Deserializer s = HiveMetaStoreUtils.getDeserializer(conf, tbl, false); return HiveMetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s); } catch (Exception e) { - StringUtils.stringifyException(e); throw new MetaException(e.getMessage()); } finally { if (orgHiveLoader != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index 18089d5..9d3c8c3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.wm.WmContext; import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -624,8 +623,7 @@ public void removeScratchDir() { fs.delete(p, true); fs.cancelDeleteOnExit(p); } catch (Exception e) { - LOG.warn("Error Removing Scratch: " - + StringUtils.stringifyException(e)); + LOG.warn("Error Removing Scratch", e); } } fsScratchDirs.clear(); @@ -645,8 +643,8 @@ public void removeMaterializedCTEs() { + materializedTable.getTableName() + ", status=" + status); } catch (IOException e) { // ignore - LOG.warn("Error removing " + location + " for materialized " + materializedTable.getTableName() + - ": " + StringUtils.stringifyException(e)); + LOG.warn("Error removing " + location + " for materialized " + + materializedTable.getTableName(), e); } } cteTables.clear(); @@ -769,7 +767,7 @@ public void clear() throws IOException { LOG.debug("Deleting result dir: {}", resDir); fs.delete(resDir, true); } catch (IOException e) { - LOG.info("Context clear error: " + StringUtils.stringifyException(e)); + LOG.info("Context clear error", e); } } @@ -779,7 +777,7 @@ public void clear() throws IOException { LOG.debug("Deleting result file: {}", resFile); fs.delete(resFile, false); } catch (IOException e) { - LOG.info("Context clear error: " + StringUtils.stringifyException(e)); + LOG.info("Context clear error", e); } } removeMaterializedCTEs(); @@ -820,10 +818,10 @@ public DataInput getStream() { return getNextStream(); } } catch (FileNotFoundException e) { - LOG.info("getStream error: " + StringUtils.stringifyException(e)); + LOG.info("getStream error", e); return null; } catch (IOException e) { - LOG.info("getStream error: " + StringUtils.stringifyException(e)); + LOG.info("getStream error", e); return null; } } @@ -835,10 +833,10 @@ private DataInput getNextStream() { return resFs.open(resDirPaths[resDirFilesNum++]); } } catch (FileNotFoundException e) { - LOG.info("getNextStream error: " + StringUtils.stringifyException(e)); + LOG.info("getNextStream error", e); return null; } catch (IOException e) { - LOG.info("getNextStream error: " + StringUtils.stringifyException(e)); + LOG.info("getNextStream error", e); return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 01ecf0a..b7d45d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -364,8 +364,7 @@ private static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) { try { lst = HiveMetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer(conf)); } catch (Exception e) { - LOG.warn("Error getting schema: " - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.warn("Error getting schema", e); } if (lst != null) { schema = new Schema(lst, null); @@ -764,8 +763,7 @@ public void run() { SQLState = error.getSQLState(); downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); throw createProcessorResponse(error.getErrorCode()); } finally { // Trigger post compilation hook. Note that if the compilation fails here then @@ -1599,8 +1597,7 @@ private String getUserFromUGI() { errorMessage = "FAILED: Error in determining user while acquiring locks: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; - console.printError(errorMessage, - "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); } return null; } @@ -1695,8 +1692,7 @@ private void acquireLocks() throws CommandProcessorResponse { errorMessage = "FAILED: Error in acquiring locks: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); throw createProcessorResponse(10); } finally { perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS); @@ -1831,8 +1827,7 @@ public CommandProcessorResponse run(String command, boolean alreadyCompiled) { } } catch(HiveException ex) { - console.printError("Unable to JSON-encode the error", - org.apache.hadoop.util.StringUtils.stringifyException(ex)); + console.printError("Unable to JSON-encode the error", ex); } return cpr; } @@ -1903,7 +1898,7 @@ private void compileInternal(String command, boolean deferClose) throws CommandP try { releaseLocksAndCommitOrRollback(false); } catch (LockException e) { - LOG.warn("Exception in releasing locks. " + org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.warn("Exception in releasing locks.", e); } throw cpr; } @@ -1951,8 +1946,7 @@ private void runInternal(String command, boolean alreadyCompiled) throws Command errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); SQLState = ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); throw createProcessorResponse(12); } @@ -2056,8 +2050,7 @@ else if(plan.getOperation() == HiveOperation.ROLLBACK) { errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); SQLState = ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); throw createProcessorResponse(12); } isFinishedWithError = false; @@ -2103,8 +2096,7 @@ private CommandProcessorResponse handleHiveException(HiveException e, int ret, S SQLState = e.getCanonicalErrorMsg() != null ? e.getCanonicalErrorMsg().getSQLState() : ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); throw createProcessorResponse(ret); } private boolean requiresLock() { @@ -2509,8 +2501,7 @@ private void execute() throws CommandProcessorResponse { } SQLState = "08S01"; downstreamError = e; - console.printError(errorMessage + "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(errorMessage, e); throw createProcessorResponse(12); } finally { // Trigger query hooks after query completes its execution. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java index a3105b6..3ddf008 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; -import org.apache.hive.common.util.HiveStringUtils; /** * Handles hook executions for {@link Driver}. @@ -89,8 +88,7 @@ public void initialize() { try { return HookUtils.readHooksFromConf(conf, hookConfVar); } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { - String message = "Error loading hooks(" + hookConfVar + "): " + HiveStringUtils.stringifyException(e); - throw new RuntimeException(message, e); + throw new RuntimeException("Error loading hooks(" + hookConfVar + ")", e); } } @@ -219,7 +217,7 @@ public ASTNode runPreAnalyzeHooks(HiveSemanticAnalyzerHookContext hookCtx, ASTNo } catch (HiveException e) { throw e; } catch (Exception e) { - throw new HiveException("Error while invoking PreAnalyzeHooks:" + HiveStringUtils.stringifyException(e), e); + throw new HiveException("Error while invoking PreAnalyzeHooks", e); } } @@ -237,7 +235,7 @@ public void runPostAnalyzeHooks(HiveSemanticAnalyzerHookContext hookCtx, } catch (HiveException e) { throw e; } catch (Exception e) { - throw new HiveException("Error while invoking PostAnalyzeHooks:" + HiveStringUtils.stringifyException(e), e); + throw new HiveException("Error while invoking PostAnalyzeHooks", e); } } @@ -251,7 +249,7 @@ public void runPreDriverHooks(HiveDriverRunHookContext hookContext) throws HiveE } catch (HiveException e) { throw e; } catch (Exception e) { - throw new HiveException("Error while invoking PreDriverHooks:" + HiveStringUtils.stringifyException(e), e); + throw new HiveException("Error while invoking PreDriverHooks", e); } } @@ -264,7 +262,7 @@ public void runPostDriverHooks(HiveDriverRunHookContext hookContext) throws Hive } catch (HiveException e) { throw e; } catch (Exception e) { - throw new HiveException("Error while invoking PostDriverHooks:" + HiveStringUtils.stringifyException(e), e); + throw new HiveException("Error while invoking PostDriverHooks", e); } } @@ -299,7 +297,7 @@ private static void invokeGeneralHook(List hooks, String } catch (HiveException e) { throw e; } catch (Exception e) { - throw new HiveException("Error while invoking " + prefix + " hooks: " + HiveStringUtils.stringifyException(e), e); + throw new HiveException("Error while invoking " + prefix + " hooks", e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java index 1a8e5e7..577f520 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.io.FileOperations; import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.api.StageType; -import org.apache.hadoop.util.StringUtils; /** * CopyTask implementation. @@ -64,8 +63,7 @@ protected int copyOnePath(Path fromPath, Path toPath) { FileSystem dstFs = null; try { Utilities.FILE_OP_LOGGER.trace("Copying data from {} to {} ", fromPath, toPath); - console.printInfo("Copying data from " + fromPath.toString(), " to " - + toPath.toString()); + console.printInfo("Copying data from " + fromPath + " to " + toPath); FileSystem srcFs = fromPath.getFileSystem(conf); dstFs = toPath.getFileSystem(conf); @@ -112,8 +110,7 @@ protected int copyOnePath(Path fromPath, Path toPath) { return 0; } catch (Exception e) { - console.printError("Failed with exception " + e.getMessage(), "\n" - + StringUtils.stringifyException(e)); + console.printError("Failed with exception " + e.getMessage(), e); return (1); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java index 1f9e9aa..a1f141f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainSQRewriteTask.java @@ -79,7 +79,7 @@ public int execute(DriverContext driverContext) { } catch (Exception e) { setException(e); - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.error("Error", e); return (1); } finally { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java index 7c4efab..3466afb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java @@ -457,7 +457,7 @@ public int execute(DriverContext driverContext) { return (0); } catch (Exception e) { - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.error("Error", e); setException(e); return (1); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 183fae5..252e732 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -623,8 +623,7 @@ public void clearFetchContext() throws HiveException { this.iterPartDesc = null; this.iterSplits = Collections.emptyIterator(); } catch (Exception e) { - throw new HiveException("Failed with exception " + e.getMessage() - + StringUtils.stringifyException(e)); + throw new HiveException("Failed with exception " + e.getMessage(), e); } } @@ -681,8 +680,7 @@ private StructObjectInspector setupOutputObjectInspector() throws HiveException tableOI, tableOI, null, false); return getPartitionedRowOI(convertedOI); } catch (Exception e) { - throw new HiveException("Failed with exception " + e.getMessage() - + StringUtils.stringifyException(e)); + throw new HiveException("Failed with exception " + e.getMessage(), e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java index 0756420..aecadea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.util.ResourceDownloader; -import org.apache.hadoop.util.StringUtils; /** * FunctionTask. @@ -285,8 +284,8 @@ private int dropPermanentFunction(Hive db, DropFunctionDesc dropFunctionDesc) { + dropFunctionDesc.getFunctionName() + " doesn't exist."); return 0; } - LOG.info("drop function: ", e); - console.printError("FAILED: error during drop function: " + StringUtils.stringifyException(e)); + LOG.info("drop function", e); + console.printError("FAILED: error during drop function", e); return 1; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index ca4391f..9fa8466 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -63,7 +63,6 @@ import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,9 +94,8 @@ private void moveFile(Path sourcePath, Path targetPath, boolean isDfsDir) perfLogger.PerfLogBegin("MoveTask", PerfLogger.FILE_MOVES); String mesg = "Moving data to " + (isDfsDir ? "" : "local ") + "directory " - + targetPath.toString(); - String mesg_detail = " from " + sourcePath.toString(); - console.printInfo(mesg, mesg_detail); + + targetPath; + console.printInfo(mesg + " from " + sourcePath); FileSystem fs = sourcePath.getFileSystem(conf); if (isDfsDir) { @@ -449,20 +447,18 @@ public int execute(DriverContext driverContext) { if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) { errorCode = he.getCanonicalErrorMsg().getErrorCode(); if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) { - console.printError("Failed with exception " + he.getMessage(), "\n" - + StringUtils.stringifyException(he)); + console.printError("Failed with exception " + he.getMessage(), he); } else { console.printError("Failed with exception " + he.getMessage() + "\nRemote Exception: " + he.getRemoteErrorMsg()); - console.printInfo("\n", StringUtils.stringifyException(he),false); + console.printInfo("", he, false); } } setException(he); return errorCode; } catch (Exception e) { - console.printError("Failed with exception " + e.getMessage(), "\n" - + StringUtils.stringifyException(e)); + console.printError("Failed with exception " + e.getMessage(), e); setException(e); return (1); } @@ -481,10 +477,8 @@ public void logMessage(LoadTableDesc tbd) { mesg.append(')'); } String mesg_detail = " from " + tbd.getSourcePath(); - if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { - Utilities.FILE_OP_LOGGER.trace(mesg.toString() + " " + mesg_detail); - } - console.printInfo(mesg.toString(), mesg_detail); + Utilities.FILE_OP_LOGGER.trace("{} {}", mesg, mesg_detail); + console.printInfo(mesg.toString() + " " + mesg_detail); } private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java index 822051c..6337d3f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer; import org.apache.hadoop.hive.ql.plan.api.StageType; -import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.hive.common.FileUtils.HIDDEN_FILES_PATH_FILTER; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_ENABLE_MOVE_OPTIMIZATION; @@ -80,8 +79,7 @@ protected int execute(DriverContext driverContext) { Path fromPath = work.getFromPaths()[0]; toPath = work.getToPaths()[0]; - console.printInfo("Copying data from " + fromPath.toString(), " to " - + toPath.toString()); + console.printInfo("Copying data from " + fromPath + " to " + toPath); ReplCopyWork rwork = ((ReplCopyWork)work); @@ -209,7 +207,7 @@ protected int execute(DriverContext driverContext) { } return 0; } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); setException(e); return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java index c91b78e..58be98f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplTxnTask.java @@ -158,8 +158,7 @@ public int execute(DriverContext driverContext) { return 1; } } catch (Exception e) { - console.printError("Failed with exception " + e.getMessage(), "\n" - + StringUtils.stringifyException(e)); + console.printError("Failed with exception " + e.getMessage(), e); setException(e); return 1; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index 3210ca5..ab3d909 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -54,7 +54,6 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.StringUtils; import org.apache.spark.SparkConf; import org.apache.spark.SparkEnv; import org.apache.spark.SparkFiles; @@ -450,8 +449,7 @@ public void process(Object row, int tag) throws HiveException { outThread.join(0); } } catch (Exception e2) { - LOG.warn("Exception in closing outThread: " - + StringUtils.stringifyException(e2)); + LOG.warn("Exception in closing outThread", e2); } setDone(true); LOG.warn("Got broken pipe during write: ignoring exception and setting operator to done"); @@ -540,8 +538,7 @@ public void run() { outThread.join(0); } } catch (Exception e) { - LOG.warn("Exception in closing outThread: " - + StringUtils.stringifyException(e)); + LOG.warn("Exception in closing outThread", e); } try { @@ -549,8 +546,7 @@ public void run() { errThread.join(0); } } catch (Exception e) { - LOG.warn("Exception in closing errThread: " - + StringUtils.stringifyException(e)); + LOG.warn("Exception in closing errThread", e); } try { @@ -558,8 +554,7 @@ public void run() { scriptPid.destroy(); } } catch (Exception e) { - LOG.warn("Exception in destroying scriptPid: " - + StringUtils.stringifyException(e)); + LOG.warn("Exception in destroying scriptPid", e); } super.close(new_abort); @@ -745,16 +740,14 @@ public void run() { } catch (Throwable th) { scriptError = th; LOG.warn("Exception in StreamThread.run(): " + th.getMessage() + - "\nCause: " + th.getCause()); - LOG.warn(StringUtils.stringifyException(th)); + "\nCause: " + th.getCause(), th); } finally { try { if (in != null) { in.close(); } } catch (Exception e) { - LOG.warn(name + ": error in closing .."); - LOG.warn(StringUtils.stringifyException(e)); + LOG.warn(name + ": error in closing ..", e); } try { @@ -762,7 +755,7 @@ public void run() { proc.close(); } }catch (Exception e) { - LOG.warn(": error in closing .."+StringUtils.stringifyException(e)); + LOG.warn(": error in closing ..", e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java index 11ef62c..bf384f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java @@ -186,7 +186,7 @@ protected Hive getHive() { try { return Hive.getWithFastCheck(conf); } catch (HiveException e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new RuntimeException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index 01dd93c..93c79af 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -246,9 +246,7 @@ public int execute(DriverContext driverContext) { FileSystem fs = emptyScratchDir.getFileSystem(job); fs.mkdirs(emptyScratchDir); } catch (IOException e) { - e.printStackTrace(); - console.printError("Error launching map-reduce job", "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError("Error launching map-reduce job", e); return 5; } @@ -363,8 +361,7 @@ public int execute(DriverContext driverContext) { job.setNumReduceTasks(1); } catch (Exception e) { LOG.error("Sampling error", e); - console.printError(e.toString(), - "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(e.toString(), e); rWork.setNumReduceTasks(1); job.setNumReduceTasks(1); } @@ -434,9 +431,7 @@ public int execute(DriverContext driverContext) { mesg = "Job Submission failed" + mesg; } - // Has to use full name to make sure it does not conflict with - // org.apache.commons.lang.StringUtils - console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(mesg, e); success = false; returnVal = 1; @@ -482,7 +477,7 @@ public int execute(DriverContext driverContext) { success = false; returnVal = 3; String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'"; - console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(mesg, e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java index 91868a4..02c3b6f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java @@ -47,7 +47,6 @@ import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.StringUtils; /** * ExecMapper is the generic Map class for Hive. Together with ExecReducer it is @@ -165,7 +164,6 @@ public void map(Object key, Object value, OutputCollector output, // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { - l4j.error(StringUtils.stringifyException(e)); throw new RuntimeException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java index e106bc9..0eac400 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java @@ -48,7 +48,6 @@ import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.StringUtils; /** * ExecReducer is the generic Reducer class for Hive. Together with ExecMapper it is @@ -233,19 +232,19 @@ public void reduce(Object key, Iterator values, OutputCollector output, try { reducer.process(row, tag); } catch (Exception e) { - String rowString = null; try { - rowString = SerDeUtils.getJSONString(row, rowObjectInspector[tag]); + // Log the contents of the row that caused exception so that it is + // available for debugging. But when exposed through an error + // message it can leak sensitive information, even to the client + // application. + final String rowString = + SerDeUtils.getJSONString(row, rowObjectInspector[tag]); + LOG.trace("Hive Runtime Error while processing row (tag=" + tag + + ") " + rowString); } catch (Exception e2) { - rowString = "[Error getting row data with exception " + - StringUtils.stringifyException(e2) + " ]"; + LOG.trace("Error getting row data", e2); } - // Log the contents of the row that caused exception so that it's available for debugging. But - // when exposed through an error message it can leak sensitive information, even to the - // client application. - LOG.trace("Hive Runtime Error while processing row (tag=" - + tag + ") " + rowString); throw new HiveException("Hive Runtime Error while processing row", e); } } @@ -256,7 +255,7 @@ public void reduce(Object key, Iterator values, OutputCollector output, // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new RuntimeException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java index 0f594a1..5eb6b15 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java @@ -523,7 +523,7 @@ public void updateWebUiStats(MapRedStats mapRedStats, RunningJob rj) { try { queryDisplay.updateTaskStatistics(mapRedStats, rj, getId()); } catch (IOException | JSONException e) { - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e), e); + LOG.error("Error", e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index 4bc7568..524813e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -406,7 +406,7 @@ public int executeInProcess(DriverContext driverContext) { retVal = 2; } l4j.error(message, throwable); - console.printError(message, HiveStringUtils.stringifyException(throwable)); + console.printError(message, throwable); return retVal; } return 0; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java index 07cb5cb..2a70cb9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java @@ -405,18 +405,18 @@ public void processRow(Object key, final Object value) throws IOException { try { reducer.process(row, tag); } catch (Exception e) { - String rowString = null; try { - rowString = SerDeUtils.getJSONString(row, rowObjectInspector[tag]); + // Log contents of the row which caused exception so that it is + // available for debugging. But when exposed through an error message + // it can leak sensitive information, even to the client application. + final String rowString = + SerDeUtils.getJSONString(row, rowObjectInspector[tag]); + LOG.trace("Hive exception while processing row (tag=" + tag + ") " + + rowString); } catch (Exception e2) { - rowString = "[Error getting row data with exception " - + StringUtils.stringifyException(e2) + " ]"; + LOG.trace("Error getting row data", e2); } - // Log contents of the row which caused exception so that it's available for debugging. But - // when exposed through an error message it can leak sensitive information, even to the - // client application. - LOG.trace("Hive exception while processing row (tag=" + tag + ") " + rowString); throw new HiveException("Error while processing row ", e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java index 9277510..0052cbd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java @@ -77,7 +77,6 @@ import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; -import org.apache.hadoop.util.StringUtils; import com.google.common.collect.Lists; @@ -377,7 +376,7 @@ private int close(int rc) { rc = 3; String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'"; - console.printError(mesg, "\n" + StringUtils.stringifyException(e)); + console.printError(mesg, e); setException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java index aeef3c1..5a07082 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/LocalSparkJobMonitor.java @@ -125,7 +125,7 @@ public int startMonitor() { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(msg, e); - console.printError(msg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(msg, e); rc = 1; done = true; sparkJobStatus.setMonitorError(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java index 87b69cb..7d9faa7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/RemoteSparkJobMonitor.java @@ -173,9 +173,7 @@ public int startMonitor() { (state != null ? state.name() : "UNKNOWN"); msg = "Failed to monitor Job[" + sparkJobStatus.getJobId() + "]" + msg; - // Has to use full name to make sure it does not conflict with - // org.apache.commons.lang.StringUtils - console.printError(msg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(msg, e); } rc = 1; done = true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java index 0ddf3f1..f96934a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.StringUtils; import org.apache.tez.mapreduce.lib.MRReader; import org.apache.tez.runtime.library.api.KeyValueReader; @@ -96,7 +95,7 @@ private boolean processRow(Object value) { // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); closeReader(); throw new RuntimeException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java index c55a394..bed518c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.StringUtils; import org.apache.tez.mapreduce.input.MRInputLegacy; import org.apache.tez.mapreduce.processor.MRTaskReporter; import org.apache.tez.runtime.api.Input; @@ -217,7 +216,7 @@ private boolean processRow(Object key, Object value) { // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { - l4j.error(StringUtils.stringifyException(e)); + l4j.error("Error", e); throw new RuntimeException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java index 72446af..9f80db1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java @@ -55,7 +55,6 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.StringUtils; import org.apache.tez.runtime.api.Reader; import org.apache.tez.runtime.library.api.KeyValueReader; import org.apache.tez.runtime.library.api.KeyValuesReader; @@ -300,7 +299,7 @@ public boolean pushRecord() throws HiveException { // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { - l4j.error(StringUtils.stringifyException(e)); + l4j.error("Error", e); throw new RuntimeException(e); } } @@ -362,19 +361,17 @@ public void next() throws HiveException { try { reducer.process(row, tag); } catch (Exception e) { - String rowString = null; try { - rowString = SerDeUtils.getJSONString(row, rowObjectInspector); + // Log the contents of the row that caused exception so that it is + // available for debugging. But when exposed through an error message + // it can leak sensitive information, even to the client application. + final String rowString = SerDeUtils.getJSONString(row, rowObjectInspector); + l4j.trace("Hive Runtime Error while processing row (tag=" + + tag + ") " + rowString); } catch (Exception e2) { - rowString = "[Error getting row data with exception " - + StringUtils.stringifyException(e2) + " ]"; + l4j.trace("Error getting row data", e2); } - // Log the contents of the row that caused exception so that it's available for debugging. But - // when exposed through an error message it can leak sensitive information, even to the - // client application. - l4j.trace("Hive Runtime Error while processing row (tag=" - + tag + ") " + rowString); throw new HiveException("Hive Runtime Error while processing row", e); } } @@ -397,7 +394,7 @@ private boolean pushRecordVector() { // Don't create a new object if we are already out of memory throw (OutOfMemoryError) e; } else { - l4j.error(StringUtils.stringifyException(e)); + l4j.error("Error", e); throw new RuntimeException(e); } } @@ -491,15 +488,14 @@ private void processVectorGroup(BytesWritable keyWritable, } batch.reset(); } catch (Exception e) { - String rowString = null; try { - rowString = batch.toString(); + final String rowString = batch.toString(); + l4j.error("Hive Runtime Error while processing vector batch (tag=" + tag + + ") (vectorizedVertexNum " + vectorizedVertexNum + ") " + rowString, e); } catch (Exception e2) { - rowString = "[Error getting row data with exception " - + StringUtils.stringifyException(e2) + " ]"; + l4j.error("Error getting row data", e2); } - l4j.error("Hive Runtime Error while processing vector batch (tag=" + tag - + ") (vectorizedVertexNum " + vectorizedVertexNum + ") " + rowString, e); + throw new HiveException("Hive Runtime Error while processing vector batch (tag=" + tag + ") (vectorizedVertexNum " + vectorizedVertexNum + ")", e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java index fa6160f..a1c9cd1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.util.StringUtils; import org.apache.tez.common.TezUtils; import org.apache.tez.mapreduce.processor.MRTaskReporter; import org.apache.tez.runtime.api.AbstractLogicalIOProcessor; @@ -273,7 +272,7 @@ protected void initializeAndRunProcessor(Map inputs, } finally { if (originalThrowable != null && (originalThrowable instanceof Error || Throwables.getRootCause(originalThrowable) instanceof Error)) { - LOG.error("Cannot recover from this FATAL error", StringUtils.stringifyException(originalThrowable)); + LOG.error("Cannot recover from this FATAL error", originalThrowable); getContext().reportFailure(TaskFailureType.FATAL, originalThrowable, "Cannot recover from this error"); throw new RuntimeException(originalThrowable); @@ -289,7 +288,7 @@ protected void initializeAndRunProcessor(Map inputs, } } if (originalThrowable != null) { - LOG.error(StringUtils.stringifyException(originalThrowable)); + LOG.error("Error", originalThrowable); if (originalThrowable instanceof InterruptedException) { throw (InterruptedException) originalThrowable; } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index f2ed07a..75e0646 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -589,7 +589,7 @@ int close(TezWork work, int rc, DAGClient dagClient) { rc = 3; String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'"; - console.printError(mesg, "\n" + StringUtils.stringifyException(e)); + console.printError(mesg, e); } } if (dagClient != null) { // null in tests diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java index 0bfa78d..84e6ece 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java @@ -107,8 +107,7 @@ public HiveHistoryImpl(SessionState ss) { log(RecordTypes.SessionStart, hm); } catch (IOException e) { console.printError("FAILED: Failed to open Query Log : " + histFileName - + " " + e.getMessage(), "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + + " " + e.getMessage(), e); } } @@ -229,7 +228,7 @@ public void setTaskCounters(String queryId, String taskId, Counters ctrs) { } } catch (Exception e) { - LOG.warn(org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.warn("Exception", e); } if (sb1.length() > 0) { taskInfoMap.get(id).hm.put(Keys.ROWS_INSERTED.name(), sb1.toString()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java index a9d845a..3d05275 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java @@ -217,7 +217,7 @@ public void run(HookContext hookContext) { String lineage = out.toString(); if (testMode) { // Logger to console - log(lineage); + log(lineage, null); } else { // In non-test mode, emit to a log file, // which can be different from the normal hive.log. @@ -227,8 +227,7 @@ public void run(HookContext hookContext) { } } catch (Throwable t) { // Don't fail the query just because of any lineage issue. - log("Failed to log lineage graph, query is not affected\n" - + org.apache.hadoop.util.StringUtils.stringifyException(t)); + log("Failed to log lineage graph, query is not affected", t); } } } @@ -236,10 +235,10 @@ public void run(HookContext hookContext) { /** * Logger an error to console if available. */ - private static void log(String error) { + private static void log(String error, Throwable t) { LogHelper console = SessionState.getConsole(); if (console != null) { - console.printError(error); + console.printError(error, t); } } @@ -300,7 +299,7 @@ private static void log(String error) { } if (dependencies == null || dependencies.size() != fields) { log("Result schema has " + fields - + " fields, but we don't get as many dependencies"); + + " fields, but we don't get as many dependencies", null); } else { // Go through each target column, generate the lineage edges. Set targets = new LinkedHashSet(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java index acc52af..98cf353 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java @@ -166,10 +166,7 @@ public int execute(DriverContext driverContext) { mesg = "Job Submission failed" + mesg; } - // Has to use full name to make sure it does not conflict with - // org.apache.commons.lang.StringUtils - console.printError(mesg, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(mesg, e); success = false; returnVal = 1; @@ -201,8 +198,7 @@ public int execute(DriverContext driverContext) { returnVal = 3; String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'"; - console.printError(mesg, "\n" + - org.apache.hadoop.util.StringUtils.stringifyException(e)); + console.printError(mesg, e); } } finally { HadoopJobExecHelper.runningJobs.remove(rj); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java index 8f21f7c..7964bed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java @@ -94,7 +94,7 @@ public int execute(DriverContext driverContext) { ctxCreated = true; } }catch (IOException e) { - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.error("Error", e); setException(e); return 5; } @@ -194,7 +194,7 @@ public int execute(DriverContext driverContext) { String mesg = rj != null ? ("Ended Job = " + rj.getJobID()) : "Job Submission failed"; // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - LOG.error(mesg, org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.error(mesg, e); setException(e); success = false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index c017790..c6797b7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1929,7 +1929,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par try { setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, tableSnapshot); } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -2141,7 +2141,7 @@ private Partition loadPartitionInternal(Path loadPath, Table tbl, Map partitions, } catch (IOException io) { LOG.error("Could not delete partition directory contents after failed partition creation: ", io); } - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -2696,7 +2696,7 @@ private void constructOneLBLocationMap(FileStatus fSta, + " in table " + tbl.getTableName() + " with loadPath=" + loadPath); } catch (TException e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } catch (Exception e) { @@ -2732,7 +2732,7 @@ private void constructOneLBLocationMap(FileStatus fSta, return result; } catch (TException te) { - LOG.error(StringUtils.stringifyException(te)); + LOG.error("Error", te); throw new HiveException("Exception updating metastore for acid table " + tableName + " with partitions " + result.values(), te); } @@ -2853,7 +2853,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); } } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } @@ -2895,7 +2895,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return new Partition(tbl, getMSC().add_partition(part)); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -2967,7 +2967,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws } } } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } return out; @@ -3058,7 +3058,7 @@ public Partition getPartition(Table tbl, Map partSpec, // getPartition() throws NoSuchObjectException to indicate null partition tpart = null; } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } try { @@ -3101,7 +3101,7 @@ public Partition getPartition(Table tbl, Map partSpec, return null; } } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } return new Partition(tbl, tpart); @@ -3435,7 +3435,7 @@ public boolean dropPartition(String dbName, String tableName, List partV // listPartitionNames() throws NoSuchObjectException to indicate null partitions return Lists.newArrayList(); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } return names; @@ -3456,7 +3456,7 @@ public boolean dropPartition(String dbName, String tableName, List partV // listPartitionNames() throws NoSuchObjectException to indicate null partitions return Lists.newArrayList(); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } return names; @@ -3476,7 +3476,7 @@ public boolean dropPartition(String dbName, String tableName, List partV tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } List parts = new ArrayList(tParts.size()); @@ -3506,7 +3506,7 @@ public boolean dropPartition(String dbName, String tableName, List partV try { tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } Set parts = new LinkedHashSet(tParts.size()); @@ -3729,7 +3729,7 @@ public void validatePartitionNameCharacters(List partVals) throws HiveEx try { getMSC().validatePartitionNameCharacters(partVals); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -4450,7 +4450,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem srcFs = srcf.getFileSystem(conf); srcs = srcFs.globStatus(srcf); } catch (IOException e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException("addFiles: filesystem error in check phase. " + e.getMessage(), e); } if (srcs == null) { @@ -4844,7 +4844,7 @@ public static boolean isHadoop1() { return convertFromMetastore(getTable(destDb, destinationTableName), partitions); } catch (Exception ex) { - LOG.error(StringUtils.stringifyException(ex)); + LOG.error("Error", ex); throw new HiveException(ex); } } @@ -4887,7 +4887,7 @@ private HiveStorageHandler createStorageHandler(org.apache.hadoop.hive.metastore HiveUtils.getStorageHandler(conf, tbl.getParameters().get(META_TABLE_STORAGE)); return storageHandler; } catch (HiveException ex) { - LOG.error(StringUtils.stringifyException(ex)); + LOG.error("Error", ex); throw new MetaException( "Failed to load storage handler: " + ex.getMessage()); } @@ -5003,7 +5003,7 @@ public boolean setPartitionColumnStatistics( request.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0); return getMSC().setPartitionColumnStatistics(request); } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -5024,7 +5024,7 @@ public boolean setPartitionColumnStatistics( } return retv; } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -5044,7 +5044,7 @@ public boolean setPartitionColumnStatistics( return getMSC().getPartitionColumnStatistics( dbName, tableName, partNames, colNames, writeIdList); } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -5060,7 +5060,7 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, } return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, writeIdList); } catch (Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); return new AggrStats(new ArrayList(),0); } } @@ -5070,7 +5070,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri try { return getMSC().deleteTableColumnStatistics(dbName, tableName, colName); } catch(Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -5080,7 +5080,7 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, try { return getMSC().deletePartitionColumnStatistics(dbName, tableName, partName, colName); } catch(Exception e) { - LOG.debug(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new HiveException(e); } } @@ -5095,7 +5095,7 @@ public String getDelegationToken(String owner, String renewer) try { return getMSC().getDelegationToken(owner, renewer); } catch(Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -5105,7 +5105,7 @@ public void cancelDelegationToken(String tokenStrForm) try { getMSC().cancelDelegationToken(tokenStrForm); } catch(Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -5144,7 +5144,7 @@ public CompactionResponse compact2(String dbname, String tableName, String partN } return getMSC().compact2(dbname, tableName, partName, cr, tblproperties); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -5152,7 +5152,7 @@ public ShowCompactResponse showCompactions() throws HiveException { try { return getMSC().showCompactions(); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -5161,7 +5161,7 @@ public GetOpenTxnsInfoResponse showTransactions() throws HiveException { try { return getMSC().showTxns(); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } @@ -5170,7 +5170,7 @@ public void abortTransactions(List txnids) throws HiveException { try { getMSC().abortTxns(txnids); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new HiveException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 2131bf1..f16eeef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -523,7 +523,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set> partSpe try { parts = db.getPartitions(table, partSpec); } catch (HiveException e) { - LOG.error("Got HiveException during obtaining list of partitions" - + StringUtils.stringifyException(e)); + LOG.error("Got HiveException during obtaining list of partitions", e); throw new SemanticException(e.getMessage(), e); } } else { @@ -4073,7 +4071,7 @@ private void addTablePartsOutputs(Table table, List> partSpe parts.add(p); } } catch (HiveException e) { - LOG.debug("Wrong specification" + StringUtils.stringifyException(e)); + LOG.debug("Wrong specification", e); throw new SemanticException(e.getMessage(), e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 54f34f6..a1d416d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -1997,9 +1997,7 @@ public void getMaterializationMetadata(QB qb) throws SemanticException { } } } catch (HiveException e) { - // Has to use full name to make sure it does not conflict with - // org.apache.commons.lang.StringUtils - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.error("Error", e); if (e instanceof SemanticException) { throw (SemanticException)e; } @@ -2587,7 +2585,7 @@ public Object dispatch(Node nd, java.util.Stack stack, // an old SQL construct which has been eliminated in a later Hive // version, so we need to provide full debugging info to help // with fixing the view definition. - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); + LOG.error("Error", e); StringBuilder sb = new StringBuilder(); sb.append(e.getMessage()); ErrorMsg.renderOrigin(sb, viewOrigin); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java index 0b334e1..45024db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/DfsProcessor.java @@ -96,8 +96,7 @@ public CommandProcessorResponse run(String command) { } catch (Exception e) { console.printError("Exception raised from DFSShell.run " - + e.getLocalizedMessage(), org.apache.hadoop.util.StringUtils - .stringifyException(e)); + + e.getLocalizedMessage(), e); return new CommandProcessorResponse(1); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index de5cd8b..180acd5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -1219,10 +1219,10 @@ public void printInfo(String info, boolean isSilent) { * BeeLine uses the operation log file to show the logs to the user, so depending on the * BeeLine settings it could be shown to the user. * @param info The log message - * @param detail Extra detail to log which will be not printed if null + * @param t Any corresponding Throwable object to include in the message */ - public void printInfo(String info, String detail) { - printInfo(info, detail, getIsSilent()); + public void printInfo(String info, Throwable t) { + printInfo(info, t, getIsSilent()); } /** @@ -1231,14 +1231,18 @@ public void printInfo(String info, String detail) { * BeeLine uses the operation log file to show the logs to the user, so depending on the * BeeLine settings it could be shown to the user. * @param info The log message - * @param detail Extra detail to log which will be not printed if null + * @param t Any corresponding Throwable object to include in the message * @param isSilent If true then the message will not be printed to the info stream */ - public void printInfo(String info, String detail, boolean isSilent) { + public void printInfo(String info, Throwable t, boolean isSilent) { if (!isSilent) { getInfoStream().println(info); } - LOG.info(info + StringUtils.defaultString(detail)); + if (t == null) { + LOG.info(info); + } else { + LOG.info(info, t); + } } /** @@ -1257,11 +1261,15 @@ public void printError(String error) { * BeeLine uses the operation log file to show the logs to the user, so depending on the * BeeLine settings it could be shown to the user. * @param error The log message - * @param detail Extra detail to log which will be not printed if null + * @param t Any corresponding Throwable object to include in the message */ - public void printError(String error, String detail) { + public void printError(String error, Throwable t) { getErrStream().println(error); - LOG.error(error + StringUtils.defaultString(detail)); + if (t == null) { + LOG.error(error); + } else { + LOG.error(error, t); + } } } @@ -1394,8 +1402,7 @@ static boolean unregisterJar(List jarsToUnregister) { return true; } catch (IOException e) { console.printError("Unable to unregister " + jarsToUnregister - + "\nException: " + e.getMessage(), "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); + + "\nException: " + e.getMessage(), e); return false; } } @@ -1521,7 +1528,7 @@ public String add_resource(ResourceType t, String value, boolean convertToUnix) t.preHook(resourceSet, localized); } catch (RuntimeException e) { - getConsole().printError(e.getMessage(), "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); + getConsole().printError(e.getMessage(), e); throw e; } catch (URISyntaxException e) { getConsole().printError(e.getMessage()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index 53b3065..149f404 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -54,7 +54,6 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.StringUtils; import org.apache.hive.common.util.ReflectionUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -232,7 +231,8 @@ public void run() { console.printInfo(msg); } catch (Exception e) { - console.printInfo("[Warning] could not update stats for " + partish.getSimpleName() + ".", "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e)); + console.printError("Could not update stats for " + + partish.getSimpleName(), e); } } @@ -300,7 +300,9 @@ private int aggregateStats(ExecutorService threadPool, Hive db) { ret = updatePartitions(db, scs, table); } catch (Exception e) { - console.printError("Failed to collect footer statistics.", "Failed with exception " + e.getMessage() + "\n" + StringUtils.stringifyException(e)); + console.printError( + "Failed to collect footer statistics. Failed with exception " + + e.getMessage(), e); // Fail the query if the stats are supposed to be reliable if (work.isStatsReliable()) { ret = -1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index 6eb1ca2..a66afbf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -55,7 +55,6 @@ import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -341,9 +340,7 @@ public Void call() throws Exception { } } catch (Exception e) { - console.printInfo("[Warning] could not update stats.", - "Failed with exception " + e.getMessage() + "\n" - + StringUtils.stringifyException(e)); + console.printError("Could not update stats.", e); // Fail the query if the stats are supposed to be reliable if (work.isStatsReliable()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index 2e25ece..252d60d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +75,7 @@ public void initialize(CompilationOpContext opContext) { JobConf job = new JobConf(conf); ftOp = new FetchOperator(fWork, job); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error("Error", e); throw new RuntimeException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 06b0209..0f8ff9c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.StringUtils; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -96,8 +95,8 @@ public void run() { clean(compactionInfo, minOpenTxnId); } } catch (Throwable t) { - LOG.error("Caught an exception in the main loop of compactor cleaner, " + - StringUtils.stringifyException(t)); + LOG.error("Caught an exception in the main loop of compactor cleaner", + t); } finally { if (handle != null) { @@ -214,8 +213,9 @@ public Object run() throws Exception { } txnHandler.markCleaned(ci); } catch (Exception e) { - LOG.error("Caught exception when cleaning, unable to complete cleaning of " + ci + " " + - StringUtils.stringifyException(e)); + LOG.error( + "Caught exception when cleaning, unable to complete cleaning of " + + ci, e); txnHandler.markFailed(ci); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index dc05e19..d7faa92 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -1113,13 +1113,13 @@ public String toString() { throw new IOException(s); } } catch (ClassNotFoundException e) { - LOG.error("Unable to instantiate class, " + StringUtils.stringifyException(e)); + LOG.error("Unable to instantiate class", e); throw new IOException(e); } catch (InstantiationException e) { - LOG.error("Unable to instantiate class, " + StringUtils.stringifyException(e)); + LOG.error("Unable to instantiate class", e); throw new IOException(e); } catch (IllegalAccessException e) { - LOG.error("Unable to instantiate class, " + StringUtils.stringifyException(e)); + LOG.error("Unable to instantiate class", e); throw new IOException(e); } return t; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index a0df82c..a696558 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -166,8 +166,7 @@ public void run() { if (compactionNeeded != null) requestCompaction(ci, runAs, compactionNeeded); } catch (Throwable t) { LOG.error("Caught exception while trying to determine if we should compact " + - ci + ". Marking failed to avoid repeated failures, " + - "" + StringUtils.stringifyException(t)); + ci + ". Marking failed to avoid repeated failures", t); txnHandler.markFailed(ci); } } @@ -181,8 +180,9 @@ public void run() { // Clean TXN_TO_WRITE_ID table for entries under min_uncommitted_txn referred by any open txns. txnHandler.cleanTxnToWriteIdTable(); } catch (Throwable t) { - LOG.error("Initiator loop caught unexpected exception this time through the loop: " + - StringUtils.stringifyException(t)); + LOG.error( + "Initiator loop caught unexpected exception this time through the loop", + t); } finally { if(handle != null) { @@ -196,8 +196,9 @@ public void run() { } while (!stop.get()); } catch (Throwable t) { - LOG.error("Caught an exception in the main loop of compactor initiator, exiting " + - StringUtils.stringifyException(t)); + LOG.error( + "Caught an exception in the main loop of compactor initiator, exiting", + t); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 42ccfdc..c64d634 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.StringUtils; import java.io.IOException; import java.net.InetAddress; @@ -219,13 +218,13 @@ public Object run() throws Exception { } } catch (Exception e) { LOG.error("Caught exception while trying to compact " + ci + - ". Marking failed to avoid repeated failures, " + StringUtils.stringifyException(e)); + ". Marking failed to avoid repeated failures", e); msc.markFailed(CompactionInfo.compactionInfoToStruct(ci)); msc.abortTxns(Collections.singletonList(compactorTxnId)); } } catch (TException | IOException t) { - LOG.error("Caught an exception in the main loop of compactor worker " + workerName + ", " + - StringUtils.stringifyException(t)); + LOG.error("Caught an exception in the main loop of compactor worker " + + workerName, t); if (msc != null) { msc.close(); } @@ -236,8 +235,8 @@ public Object run() throws Exception { LOG.error("Interrupted while sleeping to instantiate metastore client"); } } catch (Throwable t) { - LOG.error("Caught an exception in the main loop of compactor worker " + workerName + ", " + - StringUtils.stringifyException(t)); + LOG.error("Caught an exception in the main loop of compactor worker " + + workerName, t); } finally { if(heartbeater != null) { heartbeater.cancel(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java index 807eca9..ddbd7f3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.util.StringUtils; /** * GenericUDAFAverage. @@ -552,7 +551,7 @@ public void iterate(AggregationBuffer aggregation, Object[] parameters) } catch (NumberFormatException e) { if (!warned) { warned = true; - LOG.warn("Ignoring similar exceptions: " + StringUtils.stringifyException(e)); + LOG.warn("Ignoring similar exceptions", e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java index 79bf2be..95ecd57 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java @@ -286,10 +286,8 @@ public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveExcep } catch (NumberFormatException e) { if (!warned) { warned = true; - LOG.warn(getClass().getSimpleName() + " " - + StringUtils.stringifyException(e)); - LOG - .warn(getClass().getSimpleName() + LOG.warn(getClass().getSimpleName(), e); + LOG.warn(getClass().getSimpleName() + " ignoring similar exceptions."); } } @@ -431,8 +429,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveExcep } catch (NumberFormatException e) { if (!warned) { warned = true; - LOG.warn(getClass().getSimpleName() + " " - + StringUtils.stringifyException(e)); + LOG.warn(getClass().getSimpleName(), e); LOG .warn(getClass().getSimpleName() + " ignoring similar exceptions."); @@ -563,8 +560,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveExcep } catch (NumberFormatException e) { if (!warned) { warned = true; - LOG.warn(getClass().getSimpleName() + " " - + StringUtils.stringifyException(e)); + LOG.warn(getClass().getSimpleName(), e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java index bb55d88..63fd339 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.util.StringUtils; /** * Compute the variance. This class is extended by: GenericUDAFVarianceSample @@ -327,8 +326,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) } catch (NumberFormatException e) { if (!warned) { warned = true; - LOG.warn(getClass().getSimpleName() + " " - + StringUtils.stringifyException(e)); + LOG.warn(getClass().getSimpleName(), e); LOG.warn(getClass().getSimpleName() + " ignoring similar exceptions."); } diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java index 1becbb8..67ac41f 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/TestMetastoreExpr.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; import com.google.common.collect.Lists; @@ -70,7 +69,6 @@ protected void tearDown() throws Exception { client.close(); } catch (Throwable e) { System.err.println("Unable to close metastore"); - System.err.println(StringUtils.stringifyException(e)); throw new Exception(e); } } @@ -82,7 +80,6 @@ protected void setUp() throws Exception { client = new HiveMetaStoreClient(new HiveConf(this.getClass())); } catch (Throwable e) { System.err.println("Unable to open the metastore"); - System.err.println(StringUtils.stringifyException(e)); throw new Exception(e); } } diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index be37b2a..9867d7a 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hive.metastore.api.TxnState; import org.apache.hadoop.hive.metastore.api.UnlockRequest; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; -import org.apache.hadoop.util.StringUtils; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.core.LoggerContext; @@ -1545,10 +1544,12 @@ public void run() { t2.join(6000); if(ueh1.error != null) { - Assert.assertTrue("Unexpected error from t1: " + StringUtils.stringifyException(ueh1.error), false); + LOG.error("Unexpected error from t1", ueh1.error); + Assert.fail(ueh1.error.toString()); } if (ueh2.error != null) { - Assert.assertTrue("Unexpected error from t2: " + StringUtils.stringifyException(ueh2.error), false); + LOG.error("Unexpected error from t2", ueh2.error); + Assert.fail(ueh2.error.toString()); } Assert.assertEquals("5 means both threads have completed", 5, stepTracker.get()); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java index 3e45016..2f21907 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.util.StringUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -112,8 +111,9 @@ private Table createPartitionedTable(String catName, String dbName, String table db.createTable(table); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { - fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + " because " - + StringUtils.stringifyException(exception)); + fail("Unable to drop and create table " + + StatsUtils.getFullyQualifiedTableName(dbName, tableName) + + " because " + exception.getMessage()); throw exception; } } @@ -122,7 +122,8 @@ private void cleanUpTableQuietly(String catName, String dbName, String tableName try { db.dropTable(catName, dbName, tableName); } catch (Exception exception) { - fail("Unexpected exception: " + StringUtils.stringifyException(exception)); + exception.printStackTrace(); + fail("Unexpected exception: " + exception.getMessage()); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java index 1ec4636..b9e1fdc 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.util.StringUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -109,9 +108,9 @@ private Table createPartitionedTable(String catName, String dbName, String table db.createTable(table); return db.getTable(catName, dbName, tableName); } catch (Exception exception) { + exception.printStackTrace(); fail("Unable to drop and create table " + StatsUtils - .getFullyQualifiedTableName(dbName, tableName) + " because " + StringUtils - .stringifyException(exception)); + .getFullyQualifiedTableName(dbName, tableName) + " because " + exception.getMessage()); throw exception; } } @@ -120,7 +119,8 @@ private void cleanUpTableQuietly(String catName, String dbName, String tableName try { db.dropTable(catName, dbName, tableName, true, true, true); } catch (Exception exception) { - fail("Unexpected exception: " + StringUtils.stringifyException(exception)); + exception.printStackTrace(); + fail("Unexpected exception: " + exception.getMessage()); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/session/TestSparkSessionManagerImpl.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/session/TestSparkSessionManagerImpl.java index 0a2738d..e662609 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/session/TestSparkSessionManagerImpl.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/spark/session/TestSparkSessionManagerImpl.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.util.StringUtils; import org.apache.hive.spark.client.SparkClientFactory; import org.junit.Assert; @@ -319,7 +318,7 @@ public void run() { anyFailedSessionThread = true; String msg = String.format("Error executing '%s'", Thread.currentThread().getName()); LOG.error(msg, e); - fail(msg + " " + StringUtils.stringifyException(e)); + fail(msg); } } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 8d55fec..7b2da91 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -57,7 +57,6 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.util.StringUtils; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -98,7 +97,6 @@ private static Hive setUpImpl(HiveConf hiveConf) throws Exception { try { return Hive.get(hiveConf); } catch (Exception e) { - System.err.println(StringUtils.stringifyException(e)); System.err.println("Unable to initialize Hive Metastore using configuration: \n" + hiveConf); throw e; } @@ -113,7 +111,6 @@ protected void tearDown() throws Exception { hiveConf.setFloat("fs.trash.interval", 30); // FS_TRASH_INTERVAL_KEY (hadoop-2) Hive.closeCurrent(); } catch (Exception e) { - System.err.println(StringUtils.stringifyException(e)); System.err .println("Unable to close Hive Metastore using configruation: \n " + hiveConf); @@ -201,7 +198,6 @@ public void testTable() throws Throwable { assertTrue("Unable to drop table: " + tableName, false); } } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); System.err.println("testTable failed"); throw e; } @@ -226,8 +222,9 @@ public void testThriftTable() throws Throwable { try { hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e1) { - System.err.println(StringUtils.stringifyException(e1)); - assertTrue("Unable to drop table", false); + System.err.println(e1); + e1.printStackTrace(); + fail("Unable to drop table"); } Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName); tbl.setInputFormatClass(SequenceFileInputFormat.class.getName()); @@ -244,14 +241,14 @@ public void testThriftTable() throws Throwable { try { hm.createTable(tbl); } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to create table: " + tableName, false); + System.err.println(e); + e.printStackTrace(); + fail("Unable to create table: " + tableName); } // get table validateTable(tbl, tableName); hm.dropTable(DEFAULT_DATABASE_NAME, tableName); } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); System.err.println("testThriftTable() failed"); throw e; } @@ -352,8 +349,9 @@ private void validateTable(Table tbl, String tableName) throws MetaException { assertEquals("Serde is not set correctly", tbl.getDeserializer() .getClass().getName(), ft.getDeserializer().getClass().getName()); } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to fetch table correctly: " + tableName, false); + System.err.println(e); + e.printStackTrace(); + fail("Unable to fetch table correctly: " + tableName); } } @@ -426,7 +424,6 @@ public void testGetAndDropTables() throws Throwable { } hm.dropDatabase(dbName); } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); System.err.println("testGetAndDropTables() failed"); throw e; } @@ -445,7 +442,8 @@ public void run() { try { hm2r.set(setUpImpl(hiveConf2)); } catch (Exception e) { - System.err.println(StringUtils.stringifyException(e)); + System.err.println(e); + e.printStackTrace(); } } }); @@ -545,7 +543,6 @@ public void testDropTableTrash() throws Throwable { } hm.dropDatabase(dbName); } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); System.err.println("testDropTableTrash() failed"); throw e; } @@ -557,7 +554,7 @@ public void testDropTableTrash() throws Throwable { return fs.globStatus(trashDir.suffix("/*")); } - private Table createPartitionedTable(String dbName, String tableName) throws Exception { + private Table createPartitionedTable(String dbName, String tableName) { try { hm.dropTable(dbName, tableName); @@ -569,10 +566,11 @@ private Table createPartitionedTable(String dbName, String tableName) throws Exc return hm.getTable(dbName, tableName); } catch (Exception exception) { + exception.printStackTrace(); fail("Unable to drop and create table " + StatsUtils.getFullyQualifiedTableName(dbName, tableName) - + " because " + StringUtils.stringifyException(exception)); - throw exception; + + " because " + exception.getMessage()); } + return null; } private void cleanUpTableQuietly(String dbName, String tableName) { @@ -580,7 +578,9 @@ private void cleanUpTableQuietly(String dbName, String tableName) { hm.dropTable(dbName, tableName, true, true, true); } catch(Exception exception) { - fail("Unexpected exception: " + StringUtils.stringifyException(exception)); + System.err.println(exception); + exception.printStackTrace(); + fail("Unexpected exception: " + exception.getMessage()); } } @@ -641,7 +641,8 @@ public void testDropPartitionsWithPurge() throws Exception { } catch (Exception e) { - fail("Unexpected exception: " + StringUtils.stringifyException(e)); + e.printStackTrace(); + fail("Unexpected exception: " + e.getMessage()); } finally { cleanUpTableQuietly(dbName, tableName); @@ -697,7 +698,8 @@ public void testAutoPurgeTablesAndPartitions() throws Throwable { } catch(Exception e) { - fail("Unexpected failure: " + StringUtils.stringifyException(e)); + e.printStackTrace(); + fail("Unexpected failure: " + e.getMessage()); } finally { cleanUpTableQuietly(dbName, tableName); @@ -710,8 +712,9 @@ public void testPartition() throws Throwable { try { hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to drop table: " + tableName, false); + System.err.println(e); + e.printStackTrace(); + fail("Unable to drop table: " + tableName); } LinkedList cols = new LinkedList(); cols.add("key"); @@ -724,15 +727,17 @@ public void testPartition() throws Throwable { hm.createTable(tableName, cols, part_cols, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to create table: " + tableName, false); + System.err.println(e); + e.printStackTrace(); + fail("Unable to create table: " + tableName); } Table tbl = null; try { tbl = hm.getTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to fetch table: " + tableName, false); + System.err.println(e); + e.printStackTrace(); + fail("Unable to fetch table: " + tableName); } HashMap part_spec = new HashMap(); part_spec.clear(); @@ -741,12 +746,14 @@ public void testPartition() throws Throwable { try { hm.createPartition(tbl, part_spec); } catch (HiveException e) { - System.err.println(StringUtils.stringifyException(e)); - assertTrue("Unable to create parition for table: " + tableName, false); + System.err.println(e); + e.printStackTrace(); + fail("Unable to create parition for table: " + tableName); } hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName); } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); + System.err.println(e); + e.printStackTrace(); System.err.println("testPartition() failed"); throw e; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java index eccca48..6b26596 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.util.StringUtils; /** * @@ -48,7 +47,6 @@ protected void setUp() throws Exception { try { hm = Hive.get(hiveConf); } catch (Exception e) { - System.err.println(StringUtils.stringifyException(e)); System.err .println("Unable to initialize Hive Metastore using configuration: \n " + hiveConf); diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDe.java index 948cddc..f47a546 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDe.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDe.java @@ -42,7 +42,6 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; -import org.apache.hadoop.util.StringUtils; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.protocol.TProtocolFactory; import org.apache.thrift.transport.TIOStreamTransport; @@ -143,7 +142,7 @@ public void initialize(Configuration job, Properties tbl) throws SerDeException bt.initialize(); } catch (Exception e) { - System.err.println(StringUtils.stringifyException(e)); + LOG.debug("Error", e); throw new SerDeException(e); } } @@ -163,7 +162,7 @@ public Object deserialize(Writable field) throws SerDeException { deserializeReuse = bt.deserialize(deserializeReuse, iprot_); return deserializeReuse; } catch (Exception e) { - e.printStackTrace(); + LOG.debug("Error", e); throw new SerDeException(e); } } @@ -220,7 +219,7 @@ public Writable serialize(Object obj, ObjectInspector objInspector) throws SerDe bt.serialize(obj, objInspector, oprot_); oprot_.getTransport().flush(); } catch (Exception e) { - e.printStackTrace(); + LOG.debug("Error", e); throw new SerDeException(e); } ret.set(bos_.getData(), 0, bos_.getLength()); diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java index 044fd16..7b4902c 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java @@ -1274,7 +1274,7 @@ public static String getTypeNameFromJavaClass(Type t) { ObjectInspectorOptions.JAVA); return oi.getTypeName(); } catch (Throwable e) { - LOG.info(StringUtils.stringifyException(e)); + LOG.info("Error", e); return "unknown"; } } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java index b5fc994..c748b80 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java @@ -444,7 +444,7 @@ public void run() { } } } catch (IOException e) { - LOG.error("Exception when clearing cmroot:" + StringUtils.stringifyException(e)); + LOG.error("Exception when clearing cmroot", e); } } } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/StringUtils.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/StringUtils.java index e49a423..b0613dd 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/StringUtils.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/StringUtils.java @@ -96,9 +96,11 @@ public static String normalizeIdentifier(String identifier) { /** * Make a string representation of the exception. + * @deprecated User SLF4J facilities to log a Throwable * @param e The exception to stringify * @return A string with exception name and call stack. */ + @Deprecated public static String stringifyException(Throwable e) { StringWriter stm = new StringWriter(); PrintWriter wrt = new PrintWriter(stm); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index a9398ae..f10ad1a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -169,7 +169,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.core.LoggerContext; import org.apache.thrift.TException; @@ -9591,8 +9590,8 @@ public void run() { startRemoteOnlyTasks(conf); startStatsUpdater(conf); } catch (Throwable e) { - LOG.error("Failure when starting the compactor, compactions may not happen, " + - StringUtils.stringifyException(e)); + LOG.error( + "Failure starting the compactor, compactions may not happen", e); } finally { startLock.unlock(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java index 7b32544..fbe3b5c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java @@ -38,7 +38,6 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.security.token.delegation.MetastoreDelegationTokenSupport; import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -300,8 +299,7 @@ public void run() { rollMasterKeyExt(); lastMasterKeyUpdate = now; } catch (IOException e) { - LOGGER.error("Master key updating failed. " - + StringUtils.stringifyException(e)); + LOGGER.error("Master key updating failed", e); } } if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 8253ccb..6537986 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -1009,7 +1009,7 @@ public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "checkFailedCompactions(" + ci + ")"); - LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(e)); + LOG.error("Unable to connect to transaction database", e); return false;//weren't able to check } finally { close(rs, pStmt, dbConn); @@ -1084,7 +1084,7 @@ public void markFailed(CompactionInfo ci) throws MetaException {//todo: this sho checkRetryable(dbConn, e, "markFailed(" + ci + ")"); } catch(MetaException ex) { - LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex)); + LOG.error("Unable to connect to transaction database", ex); } LOG.error("markFailed(" + ci + ") failed: " + e.getMessage(), e); } finally { @@ -1118,7 +1118,7 @@ public void setHadoopJobId(String hadoopJobId, long id) { checkRetryable(dbConn, e, "setHadoopJobId(" + hadoopJobId + "," + id + ")"); } catch(MetaException ex) { - LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex)); + LOG.error("Unable to connect to transaction database", ex); } LOG.error("setHadoopJobId(" + hadoopJobId + "," + id + ") failed: " + e.getMessage(), e); } finally { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java index ea4e394..2174586 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java @@ -1397,7 +1397,7 @@ public static Partition getPartition(IMetaStoreClient msc, Table tbl, Map