diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index d121a21f62..082f1cbc09 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -1422,7 +1422,8 @@ public void closeOp(boolean abort) throws HiveException { // Hadoop always call close() even if an Exception was thrown in map() or // reduce(). for (FSPaths fsp : valToPaths.values()) { - fsp.abortWritersAndUpdaters(fs, abort, !autoDelete && isNativeTable() && !conf.isMmTable()); + fsp.abortWritersAndUpdaters(fs, abort, + !autoDelete && isNativeTable() && !conf.isMmTable() && !conf.isDirectInsert()); } } fsp = prevFsp = null; diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java index 37f5b60a0a..398af8a67e 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java @@ -116,7 +116,7 @@ public GetCrossReferenceOperation(HiveSession parentSession, this.foreignSchemaName = foreignSchema; this.foreignTableName = foreignTable; this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion(), false); - LOG.info("Starting GetCrossReferenceOperation with the following parameters:" + log.info("Starting GetCrossReferenceOperation with the following parameters:" + " parentCatalogName={}, parentSchemaName={}, parentTableName={}, foreignCatalog={}, " + "foreignSchema={}, foreignTable={}", parentCatalogName, parentSchemaName, parentTableName, foreignCatalog, foreignSchema, foreignTable); @@ -125,7 +125,7 @@ public GetCrossReferenceOperation(HiveSession parentSession, @Override public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); - LOG.info("Fetching cross reference metadata"); + log.info("Fetching cross reference metadata"); try { IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); ForeignKeysRequest fkReq = new ForeignKeysRequest(parentSchemaName, parentTableName, foreignSchemaName, foreignTableName); @@ -141,16 +141,16 @@ public void runInternal() throws HiveSQLException { fk.getKey_seq(), fk.getUpdate_rule(), fk.getDelete_rule(), fk.getFk_name(), fk.getPk_name(), 0}; rowSet.addRow(rowData); - if (LOG.isDebugEnabled()) { + if (log.isDebugEnabled()) { String debugMessage = getDebugMessage("cross reference", RESULT_SET_SCHEMA); - LOG.debug(debugMessage, rowData); + log.debug(debugMessage, rowData); } } - if (LOG.isDebugEnabled() && rowSet.numRows() == 0) { - LOG.debug("No cross reference metadata has been returned."); + if (log.isDebugEnabled() && rowSet.numRows() == 0) { + log.debug("No cross reference metadata has been returned."); } setState(OperationState.FINISHED); - LOG.info("Fetching cross reference metadata has been successfully finished"); + log.info("Fetching cross reference metadata has been successfully finished"); } catch (Exception e) { setState(OperationState.ERROR); throw new HiveSQLException(e); diff --git a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java index c83273bcf2..3a0506bd28 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java @@ -67,8 +67,8 @@ protected HiveCommandOperation(HiveSession parentSession, String statement, private void setupSessionIO(SessionState sessionState) { try { - LOG.info("Putting temp output to file " + sessionState.getTmpOutputFile().toString() - + " and error output to file " + sessionState.getTmpErrOutputFile().toString()); + log.info("Putting temp output to file " + sessionState.getTmpOutputFile() + + " and error output to file " + sessionState.getTmpErrOutputFile()); sessionState.in = null; // hive server's session input stream is not used // open a per-session file in auto-flush mode for writing temp results and tmp error output sessionState.out = new SessionStream( @@ -78,10 +78,10 @@ private void setupSessionIO(SessionState sessionState) { new FileOutputStream(sessionState.getTmpErrOutputFile()), true, StandardCharsets.UTF_8.name()); } catch (IOException e) { - LOG.error("Error in creating temp output file ", e); + log.error("Error in creating temp output file", e); // Close file streams to avoid resource leaking - ServiceUtils.cleanup(LOG, parentSession.getSessionState().out, parentSession.getSessionState().err); + ServiceUtils.cleanup(log, parentSession.getSessionState().out, parentSession.getSessionState().err); try { sessionState.in = null; @@ -90,7 +90,7 @@ private void setupSessionIO(SessionState sessionState) { sessionState.err = new SessionStream(System.err, true, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException ee) { - LOG.error("Error creating PrintStream", e); + log.error("Error creating PrintStream", e); sessionState.out = null; sessionState.err = null; } @@ -99,7 +99,7 @@ private void setupSessionIO(SessionState sessionState) { private void tearDownSessionIO() { - ServiceUtils.cleanup(LOG, parentSession.getSessionState().out, parentSession.getSessionState().err); + ServiceUtils.cleanup(log, parentSession.getSessionState().out, parentSession.getSessionState().err); } @Override @@ -120,7 +120,7 @@ public void runInternal() throws HiveSQLException { resultSchema = new TableSchema(); } if (response.getMessage() != null) { - LOG.info(response.getMessage()); + log.info(response.getMessage()); } } catch (CommandProcessorException e) { setState(OperationState.ERROR); @@ -189,8 +189,7 @@ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws H try { resultReader = new BufferedReader(new FileReader(tmp)); } catch (FileNotFoundException e) { - LOG.error("File " + tmp + " not found. ", e); - throw new HiveSQLException(e); + throw new HiveSQLException("File " + tmp + " not found", e); } } List results = new ArrayList(); @@ -205,8 +204,7 @@ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws H results.add(line); } } catch (IOException e) { - LOG.error("Reading temp results encountered an exception: ", e); - throw new HiveSQLException(e); + throw new HiveSQLException("Unable to read line from file", e); } } return results; @@ -221,7 +219,7 @@ private void cleanTmpFile() { private void resetResultReader() { if (resultReader != null) { - ServiceUtils.cleanup(LOG, resultReader); + ServiceUtils.cleanup(log, resultReader); resultReader = null; } } diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java index 3df8f6d21b..58f5492b65 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java @@ -61,7 +61,7 @@ private volatile MetricsScope currentStateScope; private final OperationHandle opHandle; public static final FetchOrientation DEFAULT_FETCH_ORIENTATION = FetchOrientation.FETCH_NEXT; - public static final Logger LOG = LoggerFactory.getLogger(Operation.class.getName()); + protected final Logger log = LoggerFactory.getLogger(getClass()); protected Boolean hasResultSet = null; protected volatile HiveSQLException operationException; protected volatile Future backgroundHandle; @@ -140,7 +140,7 @@ public OperationStatus getStatus() { try { taskStatus = getTaskStatus(); } catch (HiveSQLException sqlException) { - LOG.error("Error getting task status for " + opHandle.toString(), sqlException); + log.error("Error getting task status for {}", opHandle, sqlException); } return new OperationStatus(state, taskStatus, operationStart, operationComplete, hasResultSet, operationException); } @@ -240,7 +240,7 @@ protected void beforeRun() { createOperationLog(); LogUtils.registerLoggingContext(queryState.getConf()); - LOG.info( + log.info( "[opType={}, queryId={}, startTime={}, sessionId={}, createTime={}, userName={}, ipAddress={}]", opHandle.getOperationType(), queryState.getQueryId(), @@ -304,11 +304,11 @@ protected synchronized void cleanupOperationLog(final long operationLogCleanupDe LogUtils.stopQueryAppender(LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER, queryId); if (isOperationLogEnabled) { if (opHandle == null) { - LOG.warn("Operation seems to be in invalid state, opHandle is null"); + log.warn("Operation seems to be in invalid state, opHandle is null"); return; } if (operationLog == null) { - LOG.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + "logging is enabled, " + log.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + "logging is enabled, " + "but its OperationLog object cannot be found. " + "Perhaps the operation has already terminated."); } else { @@ -316,7 +316,7 @@ protected synchronized void cleanupOperationLog(final long operationLogCleanupDe scheduledExecutorService.schedule(new OperationLogCleaner(operationLog), operationLogCleanupDelayMs, TimeUnit.MILLISECONDS); } else { - LOG.info("Closing operation log {} without delay", operationLog); + log.info("Closing operation log {} without delay", operationLog); operationLog.close(); } } diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java index 25b6ab34a9..e4249d6e40 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java @@ -146,7 +146,7 @@ private void setupSessionIO(SessionState sessionState) { sessionState.err = new SessionStream(System.err, true, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException e) { - LOG.error("Error creating PrintStream", e); + log.error("Error creating PrintStream", e); sessionState.out = null; sessionState.info = null; sessionState.err = null; @@ -172,11 +172,11 @@ public void prepare(QueryState queryState) throws HiveSQLException { public void run() { try { String queryId = queryState.getQueryId(); - LOG.info("Query timed out after: " + queryTimeout + log.info("Query timed out after: " + queryTimeout + " seconds. Cancelling the execution now: " + queryId); SQLOperation.this.cancel(OperationState.TIMEDOUT); } catch (HiveSQLException e) { - LOG.error("Error cancelling the query after timeout: " + queryTimeout + " seconds", e); + log.error("Error cancelling the query after timeout: " + queryTimeout + " seconds", e); } finally { // Stop timeoutExecutor.shutdown(); @@ -220,7 +220,7 @@ private void runQuery() throws HiveSQLException { OperationState opState = getStatus().getState(); // Operation may have been cancelled by another thread if (opState.isTerminal()) { - LOG.info("Not running the query. Operation is already in terminal state: " + opState + log.info("Not running the query. Operation is already in terminal state: " + opState + ", perhaps cancelled due to query timeout or by another thread."); return; } @@ -238,7 +238,7 @@ private void runQuery() throws HiveSQLException { || (getStatus().getState() == OperationState.TIMEDOUT) || (getStatus().getState() == OperationState.CLOSED) || (getStatus().getState() == OperationState.FINISHED)) { - LOG.warn("Ignore exception in terminal state", e); + log.warn("Ignore exception in terminal state", e); return; } setState(OperationState.ERROR); @@ -329,7 +329,7 @@ public Object run() throws HiveSQLException { } catch (HiveSQLException e) { // TODO: why do we invent our own error path op top of the one from Future.get? setOperationException(e); - LOG.error("Error running hive query: ", e); + log.error("Error running hive query", e); } finally { LogUtils.unregisterLoggingContext(); @@ -346,7 +346,7 @@ public Object run() throws HiveSQLException { currentUGI.doAs(doAsAction); } catch (Exception e) { setOperationException(new HiveSQLException(e)); - LOG.error("Error running hive query as user : " + currentUGI.getShortUserName(), e); + log.error("Error running hive query as user : " + currentUGI.getShortUserName(), e); } finally { /** * We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup @@ -388,9 +388,9 @@ private synchronized void cleanup(OperationState state) throws HiveSQLException boolean success = backgroundHandle.cancel(true); String queryId = queryState.getQueryId(); if (success) { - LOG.info("The running operation has been successfully interrupted: " + queryId); + log.info("The running operation has been successfully interrupted: " + queryId); } else if (state == OperationState.CANCELED) { - LOG.info("The running operation could not be cancelled, typically because it has already completed normally: " + queryId); + log.info("The running operation could not be cancelled, typically because it has already completed normally: " + queryId); } } } @@ -403,7 +403,7 @@ private synchronized void cleanup(OperationState state) throws HiveSQLException SessionState ss = SessionState.get(); if (ss == null) { - LOG.warn("Operation seems to be in invalid state, SessionState is null"); + log.warn("Operation seems to be in invalid state, SessionState is null"); } else { ss.deleteTmpOutputFile(); ss.deleteTmpErrOutputFile(); @@ -420,12 +420,12 @@ public void cancel(OperationState stateAfterCancel) throws HiveSQLException { String queryId = null; if (stateAfterCancel == OperationState.CANCELED) { queryId = queryState.getQueryId(); - LOG.info("Cancelling the query execution: " + queryId); + log.info("Cancelling the query execution: " + queryId); } cleanup(stateAfterCancel); cleanupOperationLog(operationLogCleanupDelayMs); if (stateAfterCancel == OperationState.CANCELED) { - LOG.info("Successfully cancelled the query: " + queryId); + log.info("Successfully cancelled the query: " + queryId); } } @@ -586,11 +586,11 @@ private AbstractSerDe getSerDe() throws SQLException { serde = new LazySimpleSerDe(); Properties props = new Properties(); if (names.length() > 0) { - LOG.debug("Column names: " + names); + log.debug("Column names: {}", names); props.setProperty(serdeConstants.LIST_COLUMNS, names); } if (types.length() > 0) { - LOG.debug("Column types: " + types); + log.debug("Column types: {}", types); props.setProperty(serdeConstants.LIST_COLUMN_TYPES, types); } SerDeUtils.initializeSerDe(serde, queryState.getConf(), props, null);