diff --git dev-support/findHangingTest.sh dev-support/findHangingTest.sh index 4518c68..f7ebe47 100755 --- dev-support/findHangingTest.sh +++ dev-support/findHangingTest.sh @@ -38,4 +38,3 @@ cat jenkins.out | while read line; do prevLine=$line fi done -rm jenkins.out diff --git dev-support/test-patch.properties dev-support/test-patch.properties index 203df69..7b5c2fa 100644 --- dev-support/test-patch.properties +++ dev-support/test-patch.properties @@ -19,5 +19,5 @@ MAVEN_OPTS="-Xmx3g" # Please update the per-module test-patch.properties if you update this file. OK_RELEASEAUDIT_WARNINGS=84 -OK_FINDBUGS_WARNINGS=585 +OK_FINDBUGS_WARNINGS=571 OK_JAVADOC_WARNINGS=169 diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index c4a8a9e..8e78a60 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -558,9 +558,7 @@ public class HFile { hfs = (HFileSystem)fs; // open a stream to read data without checksum verification in // the filesystem - if (hfs != null) { - fsdisNoFsChecksum = hfs.getNoChecksumFs().open(path); - } + fsdisNoFsChecksum = hfs.getNoChecksumFs().open(path); } return pickReaderVersion(path, fsdis, fsdisNoFsChecksum, fs.getFileStatus(path).getLen(), closeIStream, cacheConf, diff --git src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index 63b2ed5..6f33785 100644 --- src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -128,7 +128,8 @@ public class CellCounter { context.write(new Text(thisRowFamilyName), new IntWritable(1)); } String thisRowQualifierName = - thisRowFamilyName + separator + Bytes.toStringBinary(value.getQualifier()); + thisRowFamilyName + separator + + Bytes.toStringBinary(value.getQualifier()); if (thisRowQualifierName != null && !thisRowQualifierName.equals(currentQualifierName)) { currentQualifierName = thisRowQualifierName; @@ -139,15 +140,15 @@ public class CellCounter { // Intialize versions context.getCounter("QL_VERSIONS", currentRowKey + separator + thisRowQualifierName).increment(1); - context.write(new Text(currentRowKey + separator + thisRowQualifierName + - "_Versions"), new IntWritable(1)); - + context.write(new Text(currentRowKey + separator + + thisRowQualifierName + "_Versions"), new IntWritable(1)); } else { // Increment versions currentQualifierName = thisRowQualifierName; context.getCounter("QL_VERSIONS", currentRowKey + separator + thisRowQualifierName).increment(1); - context.write(new Text(currentRowKey + separator + thisRowQualifierName + "_Versions"), + context.write(new Text(currentRowKey + separator + + thisRowQualifierName + "_Versions"), new IntWritable(1)); } } diff --git src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index de4b1e6..ab33ac7 100644 --- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -927,7 +927,7 @@ public class AssignmentManager extends ZooKeeperListener { return p.getFirst(); } catch (IOException e) { master.abort("Aborting because error occoured while reading " - + data.getRegionName() + " from .META.", e); + + Bytes.toStringBinary(data.getRegionName()) + " from .META.", e); return null; } } @@ -1756,7 +1756,6 @@ public class AssignmentManager extends ZooKeeperListener { boolean asyncSetOfflineInZooKeeper(final RegionState state, final AsyncCallback.StringCallback cb, final Object ctx) { if (!state.isClosed() && !state.isOffline()) { - new RuntimeException("Unexpected state trying to OFFLINE; " + state); this.master.abort("Unexpected state trying to OFFLINE; " + state, new IllegalStateException()); return false; diff --git src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java index 1676832..4ebd7f6 100644 --- src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java +++ src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java @@ -692,7 +692,7 @@ public class DefaultLoadBalancer implements LoadBalancer { } } catch (FileNotFoundException fnfe) { LOG.debug("FileNotFoundException during getTableDescriptors." + - " Current table name = " + tableName , fnfe); + " Current table name = " + Bytes.toStringBinary(tableName) , fnfe); } return tableDescriptor; diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index fb21bdd..5ae4706 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -255,7 +255,7 @@ Server { // Creation of a HSA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { - throw new IllegalArgumentException("Failed resolve of " + this.isa); + throw new IllegalArgumentException("Failed resolve of " + initialIsa); } int numHandlers = conf.getInt("hbase.master.handler.count", conf.getInt("hbase.regionserver.handler.count", 25)); diff --git src/main/java/org/apache/hadoop/hbase/master/ServerManager.java src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 1c253a0..b5f4cb6 100644 --- src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -533,7 +533,7 @@ public class ServerManager { */ private HRegionInterface getServerConnection(final ServerName sn) throws IOException { - HRegionInterface hri = this.serverConnections.get(sn.toString()); + HRegionInterface hri = this.serverConnections.get(sn); if (hri == null) { LOG.debug("New connection to " + sn.toString()); hri = this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 2694897..cfaff80 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -259,8 +259,8 @@ public class MemStoreLAB { @Override public String toString() { - return "Allocation(data=" + data + - " with capacity=" + data.length + + return "Allocation(" + + "capacity=" + data.length + ", off=" + offset + ")"; } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java index 00f8d99..f0c7437 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java @@ -176,7 +176,9 @@ public class ShutdownHook { Field field = null; try { - field = cache.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD); + if (cache != null) { + field = cache.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD); + } } catch (NoSuchFieldException e) { // We can get here if the Cache class does not have a clientFinalizer // instance: i.e. we're running on straight 0.20 w/o hadoop-4829. diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index bf1618e..36d9ec0 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -510,6 +510,10 @@ public class Store extends SchemaConfigured implements HeapSize { Bytes.toStringBinary(region.getStartKey()) + " last=" + Bytes.toStringBinary(region.getEndKey())); + if (firstKey == null || lastKey == null) { + throw new InvalidHFileException("Hfile has no entries"); + } + HRegionInfo hri = region.getRegionInfo(); if (!hri.containsRange(firstKey, lastKey)) { throw new WrongRegionException( diff --git src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 1636bfb..c33e951 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -963,7 +963,7 @@ public class StoreFile extends SchemaConfigured { .withPath(fs, path) .withBlockSize(blocksize) .withCompression(compress) - .withDataBlockEncoder(dataBlockEncoder) + .withDataBlockEncoder(this.dataBlockEncoder) .withComparator(comparator.getRawComparator()) .withChecksumType(checksumType) .withBytesPerChecksum(bytesPerChecksum) diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 698bb3d..baf2bbc 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -757,7 +757,7 @@ public class HLog implements Syncable { // If too many log files, figure which regions we need to flush. // Array is an array of encoded region names. byte [][] regions = null; - int logCount = this.outputfiles == null? 0: this.outputfiles.size(); + int logCount = this.outputfiles.size(); if (logCount > this.maxLogs && logCount > 0) { // This is an array of encoded region names. regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(), @@ -1881,4 +1881,4 @@ public class HLog implements Syncable { System.exit(-1); } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 545bd02..04fe8b6 100644 --- src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -791,11 +791,6 @@ public class ReplicationSource extends Thread return Long.valueOf(getTS(o1)).compareTo(getTS(o2)); } - @Override - public boolean equals(Object o) { - return true; - } - /** * Split a path to get the start time * For example: 10.20.20.171%3A60020.1277499063250 diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 62cf6ac..efb2b84 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -273,7 +273,7 @@ public class FSTableDescriptors implements TableDescriptors { Path p = status[i].getPath(); // Clean up old versions if (!fs.delete(p, false)) { - LOG.warn("Failed cleanup of " + status); + LOG.warn("Failed cleanup of " + p); } else { LOG.debug("Cleaned up old tableinfo file " + p); } diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index aebe5b0..b9c47fc 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -832,7 +832,7 @@ public abstract class FSUtils { public boolean accept(Path p) { boolean isValid = false; try { - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p.toString())) { isValid = false; } else { isValid = this.fs.getFileStatus(p).isDir(); diff --git src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index c21377c..352f332 100644 --- src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -231,8 +231,7 @@ public class JVMClusterUtil { } } } - // regionServerThreads can never be null because they are initialized when - // the class is constructed. + if (regionservers != null) { for(RegionServerThread t: regionservers) { if (t.isAlive()) { try { @@ -243,6 +242,7 @@ public class JVMClusterUtil { } } } + } if (masters != null) { for (JVMClusterUtil.MasterThread t : masters) { while (t.master.isAlive()) { diff --git src/main/java/org/apache/hadoop/hbase/util/PoolMap.java src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 8e2a856..7caf796 100644 --- src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -192,8 +192,8 @@ public class PoolMap implements Map { for (Map.Entry> poolEntry : pools.entrySet()) { final K poolKey = poolEntry.getKey(); final Pool pool = poolEntry.getValue(); - for (final V poolValue : pool.values()) { - if (pool != null) { + if (pool != null) { + for (final V poolValue : pool.values()) { entries.add(new Map.Entry() { @Override public K getKey() {