From 7a75718c439cc12f0a621756bb2d4150ddd6c247 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 31 Jul 2018 13:03:13 -0700 Subject: [PATCH] HBASE-20989 Minor, miscellaneous logging fixes --- .../hbase/client/AsyncRequestFutureImpl.java | 31 ++++++++++++---------- .../hadoop/hbase/procedure2/ProcedureExecutor.java | 2 +- .../AbstractStateMachineTableProcedure.java | 2 +- .../master/procedure/DeleteTableProcedure.java | 12 ++++----- .../master/procedure/DisableTableProcedure.java | 8 +++--- .../apache/hadoop/hbase/regionserver/HRegion.java | 2 +- 6 files changed, 30 insertions(+), 27 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index c3cb866c21..ecd58477e2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -952,19 +952,17 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { Throwable error, long backOffTime, boolean willRetry, String startTime, int failed, int stopped) { StringBuilder sb = new StringBuilder(); - sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName).append(", ") - .append("attempt=").append(numAttempt) - .append("/").append(asyncProcess.numTries).append(", "); - - if (failureCount > 0 || error != null){ - sb.append("failed=").append(failureCount).append("ops").append(", last exception="). - append(error == null ? "null" : error); - } else { - sb.append("succeeded"); + sb.append("id=").append(asyncProcess.id). + append(", table="). + append(tableName).append(", "). + append("attempt=").append(numAttempt). + append("/"). + append(asyncProcess.numTries). + append(", ").append(" on "). + append(sn); + if (startTime != null) { + sb.append(", tracking started ").append(startTime); } - - sb.append(" on ").append(sn).append(", tracking started ").append(startTime); - if (willRetry) { sb.append(", retrying after=").append(backOffTime).append("ms"). append(", replay=").append(replaySize).append("ops"); @@ -973,9 +971,14 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { sb.append("; not retrying ").append(stopped).append(" due to success from other replica"); } if (failed > 0) { - sb.append("; not retrying ").append(failed).append(" - final failure"); + sb.append("; not retrying, failed=").append(failed).append(" - final failure"); } - + } + if (failureCount > 0 || error != null) { + sb.append(", failureCount=").append(failureCount).append(" ops"); + sb.append(", last exception=").append(error == null ? "null" : error); + } else { + sb.append(", succeeded"); } return sb.toString(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 16df77eae8..88a4f93fb2 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1456,7 +1456,7 @@ public class ProcedureExecutor { private void execProcedure(final RootProcedureState procStack, final Procedure procedure) { Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, - procedure.toString()); + "state=" + procedure.getState() + " " + procedure.toString()); // Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException. // The exception is caught below and then we hurry to the exit without disturbing state. The diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java index 60436c21cc..6aa5bb3d68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java @@ -190,7 +190,7 @@ public abstract class AbstractStateMachineTableProcedure throw new UnknownRegionException("No RegionState found for " + ri.getEncodedName()); } if (!rs.isOpened()) { - throw new DoNotRetryRegionException(ri.getEncodedName() + " is not OPEN"); + throw new DoNotRetryRegionException(ri.getEncodedName() + " is not OPEN; regionState=" + rs); } if (ri.isSplitParent()) { throw new DoNotRetryRegionException(ri.getEncodedName() + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 487bb27f45..f8ebd44c77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -94,7 +94,7 @@ public class DeleteTableProcedure } // TODO: Move out... in the acquireLock() - LOG.debug("Waiting for '" + getTableName() + "' regions in transition"); + LOG.debug("Waiting for RIT for {}", this); regions = env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()); assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; ProcedureSyncWait.waitRegionInTransition(env, regions); @@ -105,29 +105,29 @@ public class DeleteTableProcedure setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META); break; case DELETE_TABLE_REMOVE_FROM_META: - LOG.debug("delete '" + getTableName() + "' regions from META"); + LOG.debug("Deleting regions from META for {}", this); DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); setNextState(DeleteTableState.DELETE_TABLE_CLEAR_FS_LAYOUT); break; case DELETE_TABLE_CLEAR_FS_LAYOUT: - LOG.debug("delete '" + getTableName() + "' from filesystem"); + LOG.debug("Deleting regions from filesystem for {}", this); DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE); regions = null; break; case DELETE_TABLE_UPDATE_DESC_CACHE: - LOG.debug("delete '" + getTableName() + "' descriptor"); + LOG.debug("Deleting descriptor for {}", this); DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName()); setNextState(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS); break; case DELETE_TABLE_UNASSIGN_REGIONS: - LOG.debug("delete '" + getTableName() + "' assignment state"); + LOG.debug("Deleting assignment state for {}"); DeleteTableProcedure.deleteAssignmentState(env, getTableName()); setNextState(DeleteTableState.DELETE_TABLE_POST_OPERATION); break; case DELETE_TABLE_POST_OPERATION: postDelete(env); - LOG.debug("delete '" + getTableName() + "' completed"); + LOG.debug("Finished {}", this); return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index 413e3ae3b4..2dc306b5bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -121,8 +121,7 @@ public class DisableTableProcedure if (isRollbackSupported(state)) { setFailure("master-disable-table", e); } else { - LOG.warn("Retriable error trying to disable table=" + tableName + - " (in state=" + state + ")", e); + LOG.warn("Retryable error in {}", this, e); } } return Flow.HAS_MORE_STATE; @@ -236,7 +235,7 @@ public class DisableTableProcedure TableStateManager tsm = env.getMasterServices().getTableStateManager(); TableState ts = tsm.getTableState(tableName); if (!ts.isEnabled()) { - LOG.info("Not ENABLED tableState=" + ts + "; skipping disable"); + LOG.info("Not ENABLED skipping {}", this); setFailure("master-disable-table", new TableNotEnabledException(ts.toString())); canTableBeDisabled = false; } @@ -272,6 +271,7 @@ public class DisableTableProcedure env.getMasterServices().getTableStateManager().setTableState( tableName, TableState.State.DISABLING); + LOG.info("Set {} state={}", TableState.State.DISABLING); } /** @@ -286,7 +286,7 @@ public class DisableTableProcedure env.getMasterServices().getTableStateManager().setTableState( tableName, TableState.State.DISABLED); - LOG.info("Disabled table, " + tableName + ", is completed."); + LOG.info("Set {} state={}", TableState.State.DISABLED); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 60b6555c57..ba75a2e092 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1505,7 +1505,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // the close flag? if (!abort && worthPreFlushing() && canFlush) { status.setStatus("Pre-flushing region before close"); - LOG.info("Running close preflush of {}" + this.getRegionInfo().getEncodedName()); + LOG.info("Running close preflush of {}", this.getRegionInfo().getEncodedName()); try { internalFlushcache(status); } catch (IOException ioe) { -- 2.16.3