diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index ff555f22ad..7efb4a574b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -572,7 +572,9 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements try { if (ZKUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) { LOG.info("successfully transitioned task " + task + " to final state " + slt); - ctr.increment(); + if (ctr != null) { + ctr.increment(); + } return; } LOG.warn("failed to transistion task " + task + " to end state " + slt diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index a1c20306b3..d4549deca6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -96,6 +96,8 @@ public class SplitLogWorker implements Runnable { // TODO have to correctly figure out when log splitting has been // interrupted or has encountered a transient error and when it has // encountered a bad non-retry-able persistent error. + // Note: this can actually get the master stuck (HBASE-22289) so treat preempted as error. + // splitLogFile does return false for legitimate retriable errors. try { if (!WALSplitter.splitLogFile(walDir, fs.getFileStatus(new Path(walDir, filename)), fs, conf, p, sequenceIdChecker, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java index 49ab574ec5..fea24cdcb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.handler; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.LongAdder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -68,6 +69,7 @@ public class WALSplitterHandler extends EventHandler { Status status = null; try { status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), reporter); + boolean wasCounterIncremented = false; switch (status) { case DONE: coordination.endTask(new SplitLogTask.Done(this.serverName), @@ -75,12 +77,17 @@ public class WALSplitterHandler extends EventHandler { break; case PREEMPTED: SplitLogCounters.tot_wkr_preempt_task.increment(); - LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); - break; + wasCounterIncremented = true; + // Preempted state can currently be returned either when task is preempted, or when there's + // a particular kind of error (e.g. some ZK/HDFS errors, in my observation). In the latter + // case, master-side split task will get stuck if we don't update the status. + // Treat preemption as error to be on the safe side. + LOG.warn("task execution preempted; treating as error " + splitTaskDetails.getWALFile()); + //$FALL-THROUGH$ case ERR: if (server != null && !server.isStopped()) { - coordination.endTask(new SplitLogTask.Err(this.serverName), - SplitLogCounters.tot_wkr_task_err, splitTaskDetails); + coordination.endTask(new SplitLogTask.Err(this.serverName), wasCounterIncremented + ? null : SplitLogCounters.tot_wkr_task_err, splitTaskDetails); break; } // if the RS is exiting then there is probably a tons of stuff @@ -91,8 +98,8 @@ public class WALSplitterHandler extends EventHandler { LOG.info("task execution interrupted because worker is exiting " + splitTaskDetails.toString()); } - coordination.endTask(new SplitLogTask.Resigned(this.serverName), - SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); + coordination.endTask(new SplitLogTask.Resigned(this.serverName), wasCounterIncremented + ? null : SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails); break; } } finally {