diff --git dev-support/test-patch.properties dev-support/test-patch.properties index 2209d27..f922d79 100644 --- dev-support/test-patch.properties +++ dev-support/test-patch.properties @@ -19,5 +19,5 @@ MAVEN_OPTS="-Xmx3g" # Please update the per-module test-patch.properties if you update this file. OK_RELEASEAUDIT_WARNINGS=84 -OK_FINDBUGS_WARNINGS=601 +OK_FINDBUGS_WARNINGS=577 OK_JAVADOC_WARNINGS=169 diff --git src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index ae76204..5041cef 100644 --- src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -50,6 +50,10 @@ org.apache.hadoop.hbase.HBaseConfiguration; } catch (IOException e) { e.printStackTrace(); } + if (serverInfo == null) { + throw new NullPointerException("Could not find information for region " + + "server: " + regionServer.toString()); + } RegionServerMetrics metrics = regionServer.getMetrics(); List onlineRegions = regionServer.getOnlineRegions(); int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000; diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 37bcaba..ecb6361 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -587,9 +587,7 @@ public class HFile { hfs = (HFileSystem)fs; // open a stream to read data without checksum verification in // the filesystem - if (hfs != null) { - fsdisNoFsChecksum = hfs.getNoChecksumFs().open(path); - } + fsdisNoFsChecksum = hfs.getNoChecksumFs().open(path); } return pickReaderVersion(path, fsdis, fsdisNoFsChecksum, fs.getFileStatus(path).getLen(), closeIStream, cacheConf, diff --git src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index 32d66fb..fb87cef 100644 --- src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -112,41 +112,41 @@ public class CellCounter { if (values != null) { context.getCounter(Counters.ROWS).increment(1); context.write(new Text("Total ROWS"), new IntWritable(1)); - } - - for (KeyValue value : values.list()) { - currentRowKey = Bytes.toStringBinary(value.getRow()); - String thisRowFamilyName = Bytes.toStringBinary(value.getFamily()); - if (thisRowFamilyName != null && - !thisRowFamilyName.equals(currentFamilyName)) { - currentFamilyName = thisRowFamilyName; - context.getCounter("CF", thisRowFamilyName).increment(1); - context.write(new Text("Total Families Across all Rows"), - new IntWritable(1)); - context.write(new Text(thisRowFamilyName), new IntWritable(1)); - } - String thisRowQualifierName = - thisRowFamilyName + separator + Bytes.toStringBinary(value.getQualifier()); - if (thisRowQualifierName != null && - !thisRowQualifierName.equals(currentQualifierName)) { - currentQualifierName = thisRowQualifierName; - context.getCounter("CFQL", thisRowQualifierName).increment(1); - context.write(new Text("Total Qualifiers across all Rows"), - new IntWritable(1)); - context.write(new Text(thisRowQualifierName), new IntWritable(1)); - // Intialize versions - context.getCounter("QL_VERSIONS", currentRowKey + separator + - thisRowQualifierName).increment(1); - context.write(new Text(currentRowKey + separator + thisRowQualifierName + - "_Versions"), new IntWritable(1)); - - } else { - // Increment versions - currentQualifierName = thisRowQualifierName; - context.getCounter("QL_VERSIONS", currentRowKey + separator + - thisRowQualifierName).increment(1); - context.write(new Text(currentRowKey + separator + thisRowQualifierName + "_Versions"), - new IntWritable(1)); + for (KeyValue value : values.list()) { + currentRowKey = Bytes.toStringBinary(value.getRow()); + String thisRowFamilyName = Bytes.toStringBinary(value.getFamily()); + if (thisRowFamilyName != null && + !thisRowFamilyName.equals(currentFamilyName)) { + currentFamilyName = thisRowFamilyName; + context.getCounter("CF", thisRowFamilyName).increment(1); + context.write(new Text("Total Families Across all Rows"), + new IntWritable(1)); + context.write(new Text(thisRowFamilyName), new IntWritable(1)); + } + String thisRowQualifierName = + thisRowFamilyName + separator + + Bytes.toStringBinary(value.getQualifier()); + if (thisRowQualifierName != null && + !thisRowQualifierName.equals(currentQualifierName)) { + currentQualifierName = thisRowQualifierName; + context.getCounter("CFQL", thisRowQualifierName).increment(1); + context.write(new Text("Total Qualifiers across all Rows"), + new IntWritable(1)); + context.write(new Text(thisRowQualifierName), new IntWritable(1)); + // Intialize versions + context.getCounter("QL_VERSIONS", currentRowKey + separator + + thisRowQualifierName).increment(1); + context.write(new Text(currentRowKey + separator + + thisRowQualifierName + "_Versions"), new IntWritable(1)); + } else { + // Increment versions + currentQualifierName = thisRowQualifierName; + context.getCounter("QL_VERSIONS", currentRowKey + separator + + thisRowQualifierName).increment(1); + context.write(new Text(currentRowKey + separator + + thisRowQualifierName + "_Versions"), + new IntWritable(1)); + } } } } catch (InterruptedException e) { diff --git src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 64def15..8b8a4b4 100644 --- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -921,7 +921,7 @@ public class AssignmentManager extends ZooKeeperListener { return p.getFirst(); } catch (IOException e) { master.abort("Aborting because error occoured while reading " - + data.getRegionName() + " from .META.", e); + + Bytes.toStringBinary(data.getRegionName()) + " from .META.", e); return null; } } @@ -1750,7 +1750,6 @@ public class AssignmentManager extends ZooKeeperListener { boolean asyncSetOfflineInZooKeeper(final RegionState state, final AsyncCallback.StringCallback cb, final Object ctx) { if (!state.isClosed() && !state.isOffline()) { - new RuntimeException("Unexpected state trying to OFFLINE; " + state); this.master.abort("Unexpected state trying to OFFLINE; " + state, new IllegalStateException()); return false; diff --git src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java index 1676832..4ebd7f6 100644 --- src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java +++ src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java @@ -692,7 +692,7 @@ public class DefaultLoadBalancer implements LoadBalancer { } } catch (FileNotFoundException fnfe) { LOG.debug("FileNotFoundException during getTableDescriptors." + - " Current table name = " + tableName , fnfe); + " Current table name = " + Bytes.toStringBinary(tableName) , fnfe); } return tableDescriptor; diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 9bd4ace..fcbd14c 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -262,7 +262,7 @@ Server { // Creation of a HSA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { - throw new IllegalArgumentException("Failed resolve of " + this.isa); + throw new IllegalArgumentException("Failed resolve of " + initialIsa); } int numHandlers = conf.getInt("hbase.master.handler.count", conf.getInt("hbase.regionserver.handler.count", 25)); diff --git src/main/java/org/apache/hadoop/hbase/master/ServerManager.java src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 3515d4a..fcc41c3 100644 --- src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -543,7 +543,7 @@ public class ServerManager { */ private HRegionInterface getServerConnection(final ServerName sn) throws IOException { - HRegionInterface hri = this.serverConnections.get(sn.toString()); + HRegionInterface hri = this.serverConnections.get(sn); if (hri == null) { LOG.debug("New connection to " + sn.toString()); hri = this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 8ae60a3..0c17480 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -635,7 +635,9 @@ public class HRegion implements HeapSize { // , Writable{ SplitTransaction.cleanupAnySplitDetritus(this); FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR)); - this.writestate.setReadOnly(this.htableDescriptor.isReadOnly()); + if (this.htableDescriptor != null) { + this.writestate.setReadOnly(this.htableDescriptor.isReadOnly()); + } this.writestate.flushRequested = false; this.writestate.compacting = 0; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 2694897..82f2dcf 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Bytes; import com.google.common.base.Preconditions; /** @@ -259,7 +260,7 @@ public class MemStoreLAB { @Override public String toString() { - return "Allocation(data=" + data + + return "Allocation(data=" + Bytes.toStringBinary(data) + " with capacity=" + data.length + ", off=" + offset + ")"; } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java index e3b230e..c7b3557 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java @@ -170,7 +170,9 @@ public class ShutdownHook { } Field field = null; try { - field = cache.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD); + if (cache != null) { + field = cache.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD); + } } catch (NoSuchFieldException e) { // We can get here if the Cache class does not have a clientFinalizer // instance: i.e. we're running on straight 0.20 w/o hadoop-4829. diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 0c7b396..a5110d4 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -505,6 +505,10 @@ public class Store extends SchemaConfigured implements HeapSize { Bytes.toStringBinary(region.getStartKey()) + " last=" + Bytes.toStringBinary(region.getEndKey())); + if (firstKey == null || lastKey == null) { + throw new InvalidHFileException("Hfile has no entries"); + } + HRegionInfo hri = region.getRegionInfo(); if (!hri.containsRange(firstKey, lastKey)) { throw new WrongRegionException( diff --git src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 2e98b39..2f4eeec 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -963,7 +963,7 @@ public class StoreFile extends SchemaConfigured { .withPath(fs, path) .withBlockSize(blocksize) .withCompression(compress) - .withDataBlockEncoder(dataBlockEncoder) + .withDataBlockEncoder(this.dataBlockEncoder) .withComparator(comparator.getRawComparator()) .withChecksumType(checksumType) .withBytesPerChecksum(bytesPerChecksum) diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 698bb3d..baf2bbc 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -757,7 +757,7 @@ public class HLog implements Syncable { // If too many log files, figure which regions we need to flush. // Array is an array of encoded region names. byte [][] regions = null; - int logCount = this.outputfiles == null? 0: this.outputfiles.size(); + int logCount = this.outputfiles.size(); if (logCount > this.maxLogs && logCount > 0) { // This is an array of encoded region names. regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(), @@ -1881,4 +1881,4 @@ public class HLog implements Syncable { System.exit(-1); } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 8950c9f..f56a094 100644 --- src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -783,11 +783,6 @@ public class ReplicationSource extends Thread return Long.valueOf(getTS(o1)).compareTo(getTS(o2)); } - @Override - public boolean equals(Object o) { - return true; - } - /** * Split a path to get the start time * For example: 10.20.20.171%3A60020.1277499063250 diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 62cf6ac..efb2b84 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -273,7 +273,7 @@ public class FSTableDescriptors implements TableDescriptors { Path p = status[i].getPath(); // Clean up old versions if (!fs.delete(p, false)) { - LOG.warn("Failed cleanup of " + status); + LOG.warn("Failed cleanup of " + p); } else { LOG.debug("Cleaned up old tableinfo file " + p); } diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index aebe5b0..b9c47fc 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -832,7 +832,7 @@ public abstract class FSUtils { public boolean accept(Path p) { boolean isValid = false; try { - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p.toString())) { isValid = false; } else { isValid = this.fs.getFileStatus(p).isDir(); diff --git src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index c21377c..352f332 100644 --- src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -231,8 +231,7 @@ public class JVMClusterUtil { } } } - // regionServerThreads can never be null because they are initialized when - // the class is constructed. + if (regionservers != null) { for(RegionServerThread t: regionservers) { if (t.isAlive()) { try { @@ -243,6 +242,7 @@ public class JVMClusterUtil { } } } + } if (masters != null) { for (JVMClusterUtil.MasterThread t : masters) { while (t.master.isAlive()) { diff --git src/main/java/org/apache/hadoop/hbase/util/Merge.java src/main/java/org/apache/hadoop/hbase/util/Merge.java index 04f15d4..db425f8 100644 --- src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -152,12 +152,20 @@ public class Merge extends Configured implements Tool { Get get = new Get(region1); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells1 = rootRegion.get(get, null).list(); - HRegionInfo info1 = Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue()); + if (cells1 == null) { + throw new NullPointerException("cells1 is null using key " + + Bytes.toStringBinary(region1)); + } + HRegionInfo info1 = Writables.getHRegionInfo(cells1.get(0).getValue()); get = new Get(region2); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = rootRegion.get(get, null).list(); - HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); + if (cells2 == null) { + throw new NullPointerException("cells2 is null using key " + + Bytes.toStringBinary(region1)); + } + HRegionInfo info2 = Writables.getHRegionInfo(cells2.get(0).getValue()); HRegion merged = merge(HTableDescriptor.META_TABLEDESC, info1, rootRegion, info2, rootRegion); LOG.info("Adding " + merged.getRegionInfo() + " to " + rootRegion.getRegionInfo()); @@ -221,8 +229,11 @@ public class Merge extends Configured implements Tool { Get get = new Get(region1); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells1 = metaRegion1.get(get, null).list(); - HRegionInfo info1 = - Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue()); + if (cells1 == null) { + throw new NullPointerException("cells1 is null using key " + + Bytes.toStringBinary(region1) + " in " + meta1); + } + HRegionInfo info1 = Writables.getHRegionInfo(cells1.get(0).getValue()); if (info1 == null) { throw new NullPointerException("info1 is null using key " + Bytes.toStringBinary(region1) + " in " + meta1); @@ -237,7 +248,11 @@ public class Merge extends Configured implements Tool { get = new Get(region2); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = metaRegion2.get(get, null).list(); - HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); + if (cells2 == null) { + throw new NullPointerException("cells2 is null using key " + + Bytes.toStringBinary(region1) + " in " + meta1); + } + HRegionInfo info2 = Writables.getHRegionInfo(cells2.get(0).getValue()); if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta2); } diff --git src/main/java/org/apache/hadoop/hbase/util/PoolMap.java src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 8e2a856..7caf796 100644 --- src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -192,8 +192,8 @@ public class PoolMap implements Map { for (Map.Entry> poolEntry : pools.entrySet()) { final K poolKey = poolEntry.getKey(); final Pool pool = poolEntry.getValue(); - for (final V poolValue : pool.values()) { - if (pool != null) { + if (pool != null) { + for (final V poolValue : pool.values()) { entries.add(new Map.Entry() { @Override public K getKey() {