From 28b7a1bfdc931c57c49f208ab247425ec93d47a5 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Sat, 24 Feb 2018 13:01:02 -0800 Subject: [PATCH] HBASE-20069 fix existing findbugs errors in hbase-server --- .../hadoop/hbase/io/encoding/EncodedDataBlock.java | 4 +- .../org/apache/hadoop/hbase/nio/MultiByteBuff.java | 1 - .../hadoop/hbase/procedure2/ProcedureExecutor.java | 1 - .../hbase/procedure2/StateMachineProcedure.java | 1 - .../apache/hadoop/hbase/ipc/NettyRpcServer.java | 2 + .../apache/hadoop/hbase/ipc/SimpleRpcServer.java | 2 + .../org/apache/hadoop/hbase/master/HMaster.java | 2 - .../hbase/master/assignment/AssignmentManager.java | 1 - .../assignment/SplitTableRegionProcedure.java | 7 ++- .../hadoop/hbase/master/cleaner/CleanerChore.java | 40 +++++++------ .../hbase/regionserver/CompactingMemStore.java | 4 ++ .../apache/hadoop/hbase/regionserver/HRegion.java | 4 +- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 67 ++++++++++++++++------ .../hadoop/hbase/regionserver/RSRpcServices.java | 1 - .../hbase/regionserver/RegionCoprocessorHost.java | 2 + .../hadoop/hbase/regionserver/wal/AsyncFSWAL.java | 6 +- .../hbase/util/compaction/MajorCompactor.java | 5 +- 17 files changed, 98 insertions(+), 52 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java index a791c09f37..94a459d5fb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java @@ -257,7 +257,9 @@ public class EncodedDataBlock { } BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream(); baos.writeTo(stream); - this.dataBlockEncoder.endBlockEncoding(encodingCtx, out, stream.ourBytes); + synchronized(stream) { + this.dataBlockEncoder.endBlockEncoding(encodingCtx, out, stream.ourBytes); + } } catch (IOException e) { throw new RuntimeException(String.format( "Bug in encoding part of algorithm %s. " + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java index fecf012aab..5f45c10038 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java @@ -285,7 +285,6 @@ public class MultiByteBuff extends ByteBuff { // means cur item is the last one and we wont be able to read a int. Throw exception throw new BufferUnderflowException(); } - ByteBuffer nextItem = items[itemIndex + 1]; // Get available bytes from this item and remaining from next short l = 0; for (int i = offsetInItem; i < item.capacity(); i++) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 665d22319a..19efdc79c0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -206,7 +206,6 @@ public class ProcedureExecutor { final long now = EnvironmentEdgeManager.currentTime(); final Iterator> it = completed.entrySet().iterator(); - final boolean debugEnabled = LOG.isDebugEnabled(); while (it.hasNext() && store.isRunning()) { final Map.Entry entry = it.next(); final CompletedProcedureRetainer retainer = entry.getValue(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index c530386d6e..0880238b10 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -209,7 +209,6 @@ public abstract class StateMachineProcedure @Override protected boolean abort(final TEnvironment env) { - final TState state = getCurrentState(); LOG.debug("Abort requested for {}", this); if (hasMoreState()) { aborted.set(true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index 47826ae736..d6b1c8d0af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -120,6 +120,8 @@ public class NettyRpcServer extends RpcServer { } @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", + justification="Start is synchronized so authManager creation is single-threaded") public synchronized void start() { if (started) { return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 13a3cf7171..6035649aee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -418,6 +418,8 @@ public class SimpleRpcServer extends RpcServer { /** Starts the service. Must be called before any calls will be handled. */ @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", + justification="Start is synchronized so authManager creation is single-threaded") public synchronized void start() { if (started) return; authTokenSecretMgr = createSecretManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8e2aa32b30..7bffa0595f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1207,8 +1207,6 @@ public class HMaster extends HRegionServer implements MasterServices { private void startProcedureExecutor() throws IOException { final MasterProcedureEnv procEnv = new MasterProcedureEnv(this); - final Path rootDir = FSUtils.getRootDir(conf); - procedureStore = new WALProcedureStore(conf, new MasterProcedureEnv.WALStoreLeaseRecovery(this)); procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index ff65f46b20..1292c5e444 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -1302,7 +1302,6 @@ public class AssignmentManager implements ServerListener { final Set offlineServersWithOnlineRegions = new HashSet<>(); int size = regionStates.getRegionStateNodes().size(); final List offlineRegionsToAssign = new ArrayList<>(size); - long startTime = System.currentTimeMillis(); // If deadservers then its a failover, else, we are not sure yet. boolean failover = deadServers; for (RegionStateNode regionNode: regionStates.getRegionStateNodes()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 46ec149f3f..cabccbc320 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -432,6 +432,10 @@ public class SplitTableRegionProcedure } RegionInfo parentHRI = node.getRegionInfo(); + if (parentHRI == null) { + LOG.info("Unsplittable; parent region is null; node={}", node); + return false; + } // Lookup the parent HRI state from the AM, which has the latest updated info. // Protect against the case where concurrent SPLIT requests came in and succeeded // just before us. @@ -457,8 +461,7 @@ public class SplitTableRegionProcedure // we are always able to split the region if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { LOG.warn("pid=" + getProcId() + " split switch is off! skip split of " + parentHRI); - setFailure(new IOException("Split region " + - (parentHRI == null? "null": parentHRI.getRegionNameAsString()) + + setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString() + " failed due to split switch off")); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index 9ef7dce326..bf4bff8ce2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -50,6 +50,8 @@ import org.slf4j.LoggerFactory; * Abstract Cleaner that uses a chain of delegates to clean a directory of files * @param Cleaner delegate class that is dynamically loaded from configuration */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", + justification="TODO: Fix. It is wonky have static pool initialized from instance") public abstract class CleanerChore extends ScheduledChore implements ConfigurationObserver { @@ -67,8 +69,9 @@ public abstract class CleanerChore extends Schedu // It may be waste resources for each cleaner chore own its pool, // so let's make pool for all cleaner chores. - private static volatile ForkJoinPool chorePool; - private static volatile int chorePoolSize; + // TODO: FIX WONKY HAVING STATIC INITIALIZED FROM INSTANCE + private static volatile ForkJoinPool CHOREPOOL; + private static volatile int CHOREPOOLSIZE; protected final FileSystem fs; private final Path oldFileDir; @@ -102,15 +105,14 @@ public abstract class CleanerChore extends Schedu this.params = params; initCleanerChain(confKey); - if (chorePool == null) { + if (CHOREPOOL == null) { String poolSize = conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE); - chorePoolSize = calculatePoolSize(poolSize); + CHOREPOOLSIZE = calculatePoolSize(poolSize); // poolSize may be 0 or 0.0 from a careless configuration, // double check to make sure. - chorePoolSize = chorePoolSize == 0 ? - calculatePoolSize(DEFAULT_CHORE_POOL_SIZE) : chorePoolSize; - this.chorePool = new ForkJoinPool(chorePoolSize); - LOG.info("Cleaner pool size is {}", chorePoolSize); + CHOREPOOLSIZE = CHOREPOOLSIZE == 0? calculatePoolSize(DEFAULT_CHORE_POOL_SIZE): CHOREPOOLSIZE; + this.CHOREPOOL = new ForkJoinPool(CHOREPOOLSIZE); + LOG.info("Cleaner pool size is {}", CHOREPOOLSIZE); } } @@ -119,11 +121,11 @@ public abstract class CleanerChore extends Schedu * @param poolSize size from configuration * @return size of pool after calculation */ - int calculatePoolSize(String poolSize) { + static int calculatePoolSize(String poolSize) { if (poolSize.matches("[1-9][0-9]*")) { // If poolSize is an integer, return it directly, // but upmost to the number of available processors. - int size = Math.min(Integer.valueOf(poolSize), AVAIL_PROCESSORS); + int size = Math.min(Integer.parseInt(poolSize), AVAIL_PROCESSORS); if (size == AVAIL_PROCESSORS) { LOG.warn("Use full core processors to scan dir, size={}", size); } @@ -173,12 +175,12 @@ public abstract class CleanerChore extends Schedu @Override public void onConfigurationChange(Configuration conf) { int updatedSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE)); - if (updatedSize == chorePoolSize) { + if (updatedSize == CHOREPOOLSIZE) { LOG.trace("Size from configuration is same as previous={}, no need to update.", updatedSize); return; } - chorePoolSize = updatedSize; - if (chorePool.getPoolSize() == 0) { + CHOREPOOLSIZE = updatedSize; + if (CHOREPOOL.getPoolSize() == 0) { // Chore does not work now, update it directly. updateChorePoolSize(updatedSize); return; @@ -188,9 +190,9 @@ public abstract class CleanerChore extends Schedu } private void updateChorePoolSize(int updatedSize) { - chorePool.shutdownNow(); - LOG.info("Update chore's pool size from {} to {}", chorePool.getParallelism(), updatedSize); - chorePool = new ForkJoinPool(updatedSize); + CHOREPOOL.shutdownNow(); + LOG.info("Update chore's pool size from {} to {}", CHOREPOOL.getParallelism(), updatedSize); + CHOREPOOL = new ForkJoinPool(updatedSize); } /** @@ -226,7 +228,7 @@ public abstract class CleanerChore extends Schedu } // After each clean chore, checks if receives reconfigure notification while cleaning if (reconfig.compareAndSet(true, false)) { - updateChorePoolSize(chorePoolSize); + updateChorePoolSize(CHOREPOOLSIZE); } } else { LOG.debug("Cleaner chore disabled! Not cleaning."); @@ -240,7 +242,7 @@ public abstract class CleanerChore extends Schedu public Boolean runCleaner() { preRunCleaner(); CleanerTask task = new CleanerTask(this.oldFileDir, true); - chorePool.submit(task); + CHOREPOOL.submit(task); return task.join(); } @@ -372,7 +374,7 @@ public abstract class CleanerChore extends Schedu @VisibleForTesting int getChorePoolSize() { - return chorePoolSize; + return CHOREPOOLSIZE; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index bcecdc7207..e16da04ba7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -136,13 +136,17 @@ public class CompactingMemStore extends AbstractMemStore { } inmemoryFlushSize = memstoreFlushSize / numStores; // multiply by a factor (different factors for different index types) + /** BUG SAME STATEMENT EITHER SIDE OF IF; HBASE-20074 if (indexType == IndexType.ARRAY_MAP) { factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT); } else { + */ factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT); + /* BUG } + */ inmemoryFlushSize = (long) (inmemoryFlushSize * factor); LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize + " and immutable segments index to be of type " + indexType); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 79ff3a7821..93cce48bde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -8112,8 +8112,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="SF_SWITCH_FALLTHROUGH,SF_SWITCH_NO_DEFAULT", justification="Intentional") public void startRegionOperation(Operation op) throws IOException { switch (op) { case GET: // read operations diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 6e4191e7a3..a4feeab534 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -147,17 +147,18 @@ class MemStoreFlusher implements FlushRequester { boolean flushedOne = false; while (!flushedOne) { - // Find the biggest region that doesn't have too many storefiles - // (might be null!) - HRegion bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); + // Find the biggest region that doesn't have too many storefiles (might be null!) + HRegion bestFlushableRegion = + getBiggestMemStoreRegion(regionsBySize, excludedRegions, true); // Find the biggest region, total, even if it might have too many flushes. - HRegion bestAnyRegion = getBiggestMemStoreRegion( - regionsBySize, excludedRegions, false); + HRegion bestAnyRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, false); // Find the biggest region that is a secondary region - HRegion bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, - excludedRegions); - - if (bestAnyRegion == null && bestRegionReplica == null) { + HRegion bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, excludedRegions); + if (bestAnyRegion == null) { + // If bestAnyRegion is null, assign replica. It may be null too. Next step is check for null + bestAnyRegion = bestRegionReplica; + } + if (bestAnyRegion == null) { LOG.error("Above memory mark but there are no flushable regions!"); return false; } @@ -169,19 +170,20 @@ class MemStoreFlusher implements FlushRequester { case ABOVE_OFFHEAP_HIGHER_MARK: case ABOVE_OFFHEAP_LOWER_MARK: bestAnyRegionSize = bestAnyRegion.getMemStoreOffHeapSize(); - bestFlushableRegionSize = bestFlushableRegion.getMemStoreOffHeapSize(); + bestFlushableRegionSize = getMemStoreOffHeapSize(bestFlushableRegion); break; + case ABOVE_ONHEAP_HIGHER_MARK: case ABOVE_ONHEAP_LOWER_MARK: bestAnyRegionSize = bestAnyRegion.getMemStoreHeapSize(); - bestFlushableRegionSize = bestFlushableRegion.getMemStoreHeapSize(); + bestFlushableRegionSize = getMemStoreHeapSize(bestFlushableRegion); break; + default: bestAnyRegionSize = bestAnyRegion.getMemStoreDataSize(); - bestFlushableRegionSize = bestFlushableRegion.getMemStoreDataSize(); + bestFlushableRegionSize = getMemStoreDataSize(bestFlushableRegion); } - if (bestFlushableRegion != null && - bestAnyRegionSize > 2 * bestFlushableRegionSize) { + if (bestAnyRegionSize > 2 * bestFlushableRegionSize) { // Even if it's not supposed to be flushed, pick a region if it's more than twice // as big as the best flushable one - otherwise when we're under pressure we make // lots of little flushes and cause lots of compactions, etc, which just makes @@ -211,21 +213,22 @@ class MemStoreFlusher implements FlushRequester { case ABOVE_OFFHEAP_HIGHER_MARK: case ABOVE_OFFHEAP_LOWER_MARK: regionToFlushSize = regionToFlush.getMemStoreOffHeapSize(); - bestRegionReplicaSize = bestRegionReplica.getMemStoreOffHeapSize(); + bestRegionReplicaSize = getMemStoreOffHeapSize(bestRegionReplica); break; + case ABOVE_ONHEAP_HIGHER_MARK: case ABOVE_ONHEAP_LOWER_MARK: regionToFlushSize = regionToFlush.getMemStoreHeapSize(); - bestRegionReplicaSize = bestRegionReplica.getMemStoreHeapSize(); + bestRegionReplicaSize = getMemStoreHeapSize(bestRegionReplica); break; + default: regionToFlushSize = regionToFlush.getMemStoreDataSize(); - bestRegionReplicaSize = bestRegionReplica.getMemStoreDataSize(); + bestRegionReplicaSize = getMemStoreDataSize(bestRegionReplica); } Preconditions.checkState( - (regionToFlush != null && regionToFlushSize > 0) || - (bestRegionReplica != null && bestRegionReplicaSize > 0)); + (regionToFlush != null && regionToFlushSize > 0) || bestRegionReplicaSize > 0); if (regionToFlush == null || (bestRegionReplica != null && @@ -266,6 +269,27 @@ class MemStoreFlusher implements FlushRequester { return true; } + /** + * @return Return memstore offheap size or null if r is null + */ + private static long getMemStoreOffHeapSize(HRegion r) { + return r == null? 0: r.getMemStoreOffHeapSize(); + } + + /** + * @return Return memstore heap size or null if r is null + */ + private static long getMemStoreHeapSize(HRegion r) { + return r == null? 0: r.getMemStoreHeapSize(); + } + + /** + * @return Return memstore data size or null if r is null + */ + private static long getMemStoreDataSize(HRegion r) { + return r == null? 0: r.getMemStoreDataSize(); + } + private class FlushHandler extends HasThread { private FlushHandler(String name) { @@ -772,6 +796,11 @@ class MemStoreFlusher implements FlushRequester { public int compareTo(Delayed o) { return -1; } + + @Override + public boolean equals(Object obj) { + return obj instanceof Delayed && compareTo((Delayed)obj) == 0; + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 33ee548d3d..42284e9c07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1206,7 +1206,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new IllegalArgumentException("Failed resolve of " + initialIsa); } priority = createPriority(); - String hostname = initialIsa.getHostName(); // Using Address means we don't get the IP too. Shorten it more even to just the host name // w/o the domain. String name = rs.getProcessName() + "/" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 5ef579bec4..f3c93dce67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1206,6 +1206,8 @@ public class RegionCoprocessorHost * @return true or false to return to client if default processing should be bypassed, * or null otherwise */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_BOOLEAN_RETURN_NULL", + justification="TODO: Fix") public Boolean preCheckAndDeleteAfterRowLock(final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, final Delete delete) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index d22d1ec6fe..cae88796f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -654,8 +654,12 @@ public class AsyncFSWAL extends AbstractFSWAL { waitForSafePoint(); long oldFileLen = closeWriter(); logRollAndSetupWalProps(oldPath, newPath, oldFileLen); + // Check for null. + if (nextWriter == null) { + throw new NullPointerException("nextWriter"); + } this.writer = nextWriter; - if (nextWriter != null && nextWriter instanceof AsyncProtobufLogWriter) { + if (nextWriter instanceof AsyncProtobufLogWriter) { this.fsOut = ((AsyncProtobufLogWriter) nextWriter).getOutput(); } this.fileLengthAtLastSync = nextWriter.getLength(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java index c3372bb3d8..49624570dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java @@ -331,6 +331,9 @@ public class MajorCompactor { CommandLine commandLine = null; try { commandLine = cmdLineParser.parse(options, args); + if (commandLine == null) { + throw new ParseException("commandLine is null"); + } } catch (ParseException parseException) { System.out.println( "ERROR: Unable to parse command-line arguments " + Arrays.toString(args) + " due to: " @@ -353,7 +356,7 @@ public class MajorCompactor { String quorum = commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR)); - long sleep = Long.valueOf(commandLine.getOptionValue("sleep", Long.toString(30000))); + long sleep = Long.parseLong(commandLine.getOptionValue("sleep", Long.toString(30000))); configuration.set(HConstants.HBASE_DIR, rootDir); configuration.set(HConstants.ZOOKEEPER_QUORUM, quorum); -- 2.11.0 (Apple Git-81)