Index: dev-support/findbugs-exclude.xml =================================================================== --- dev-support/findbugs-exclude.xml (revision 1327958) +++ dev-support/findbugs-exclude.xml (working copy) @@ -48,4 +48,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Index: dev-support/test-patch.properties =================================================================== --- dev-support/test-patch.properties (revision 1327958) +++ dev-support/test-patch.properties (working copy) @@ -19,5 +19,5 @@ # Please update the per-module test-patch.properties if you update this file. OK_RELEASEAUDIT_WARNINGS=84 -OK_FINDBUGS_WARNINGS=549 +OK_FINDBUGS_WARNINGS=524 OK_JAVADOC_WARNINGS=169 Index: src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon =================================================================== --- src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (revision 1327958) +++ src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (working copy) @@ -48,7 +48,6 @@ serverName = regionServer.getServerName(); RegionServerMetrics metrics = regionServer.getMetrics(); List onlineRegions = regionServer.getOnlineRegions(); - int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000; int masterInfoPort = regionServer.getConfiguration().getInt("hbase.master.info.port", 60010); Index: src/main/java/org/apache/hadoop/hbase/filter/FilterList.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/filter/FilterList.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/filter/FilterList.java (working copy) @@ -222,9 +222,18 @@ case INCLUDE: rc = ReturnCode.INCLUDE; // must continue here to evaluate all filters + break; case NEXT_ROW: + break; case SKIP: // continue; + break; + case NEXT_COL: + break; + case SEEK_NEXT_USING_HINT: + break; + default: + throw new IllegalStateException("Received code is not valid."); } } } Index: src/main/java/org/apache/hadoop/hbase/HServerLoad.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HServerLoad.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/HServerLoad.java (working copy) @@ -386,7 +386,7 @@ */ private void readFields92(DataInput in) throws IOException { // in 0.92, the version was actually written twice, consume the second copy - int version = in.readByte(); + in.readByte(); int namelen = in.readInt(); this.name = new byte[namelen]; in.readFully(this.name); @@ -503,7 +503,7 @@ float compactionProgressPct = Float.NaN; if( this.totalCompactingKVs > 0 ) { compactionProgressPct = Float.valueOf( - this.currentCompactedKVs / this.totalCompactingKVs); + (float)this.currentCompactedKVs / this.totalCompactingKVs); } sb = Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); Index: src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (working copy) @@ -198,10 +198,6 @@ if (key == null || length <= 0) { throw new IOException("Key cannot be null or empty"); } - if (length > HFile.MAXIMUM_KEY_LENGTH) { - throw new IOException("Key length " + length + " > " - + HFile.MAXIMUM_KEY_LENGTH); - } if (lastKeyBuffer != null) { int keyComp = comparator.compare(lastKeyBuffer, lastKeyOffset, lastKeyLength, key, offset, length); Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy) @@ -883,7 +883,7 @@ offset = Bytes.putLong(dest, offset, prevOffset); offset = Bytes.putByte(dest, offset, checksumType.getCode()); offset = Bytes.putInt(dest, offset, bytesPerChecksum); - offset = Bytes.putInt(dest, offset, onDiskDataSize); + Bytes.putInt(dest, offset, onDiskDataSize); } /** Index: src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (working copy) @@ -671,7 +671,7 @@ return CACHE_FIXED_OVERHEAD + ClassSize.CONCURRENT_HASHMAP + ((long)Math.ceil(maxSize*1.2/blockSize) * ClassSize.CONCURRENT_HASHMAP_ENTRY) + - (concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); + ((long)concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } @Override Index: src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (working copy) @@ -185,7 +185,6 @@ (VersionedProtocol) Proxy.newProxyInstance( protocol.getClassLoader(), new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf, factory, rpcTimeout)); - if (proxy instanceof VersionedProtocol) { try { long serverVersion = ((VersionedProtocol)proxy) .getProtocolVersion(protocol.getName(), clientVersion); @@ -206,7 +205,6 @@ } throw (IOException)t; } - } return proxy; } Index: src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy) @@ -608,6 +608,9 @@ } failoverProcessedRegions.put(encodedRegionName, regionInfo); break; + + default: + throw new IllegalStateException("Received event is not valid."); } } } @@ -897,6 +900,9 @@ new OpenedRegionHandler(master, this, regionState.getRegion(), data.getOrigin(), expectedVersion)); break; + + default: + throw new IllegalStateException("Received event is not valid."); } } } @@ -1988,7 +1994,6 @@ return; } catch (KeeperException ke) { LOG.error("Unexpected zk state", ke); - ke = e; } } // If we get here, don't understand whats going on -- abort. @@ -2938,6 +2943,9 @@ "expire, send RPC again"); invokeUnassign(regionInfo); break; + + default: + throw new IllegalStateException("Received event is not valid."); } } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java (working copy) @@ -50,6 +50,6 @@ * @return float */ public float getProgressPct() { - return currentCompactedKVs / totalCompactingKVs; + return (float)currentCompactedKVs / totalCompactingKVs; } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -2267,7 +2267,7 @@ // Lock row Integer lid = getLock(lockId, get.getRow(), true); - List result = new ArrayList(); + List result = null; try { result = get(get, false); Index: src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java (working copy) @@ -88,7 +88,7 @@ long getSizeToCheck(final int tableRegionsCount) { return tableRegionsCount == 0? getDesiredMaxFileSize(): Math.min(getDesiredMaxFileSize(), - this.flushSize * (tableRegionsCount * tableRegionsCount)); + this.flushSize * (tableRegionsCount * (long)tableRegionsCount)); } /** Index: src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java (working copy) @@ -111,8 +111,7 @@ * != c */ private void tryRetireChunk(Chunk c) { - @SuppressWarnings("unused") - boolean weRetiredIt = curChunk.compareAndSet(c, null); + curChunk.compareAndSet(c, null); // If the CAS succeeds, that means that we won the race // to retire the chunk. We could use this opportunity to // update metrics on external fragmentation. Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy) @@ -1438,8 +1438,12 @@ */ public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, int rowLen, byte[] col, int colOffset, int colLen) { - if (generalBloomFilter == null) + // Cache Bloom filter as a local variable in case it is set to null by + // another thread on an IO error. + BloomFilter bloomFilter = this.generalBloomFilter; + if (bloomFilter == null) { return true; + } byte[] key; switch (bloomFilterType) { @@ -1456,7 +1460,7 @@ break; case ROWCOL: - key = generalBloomFilter.createBloomKey(row, rowOffset, rowLen, col, + key = bloomFilter.createBloomKey(row, rowOffset, rowLen, col, colOffset, colLen); break; @@ -1464,14 +1468,6 @@ return true; } - // Cache Bloom filter as a local variable in case it is set to null by - // another thread on an IO error. - BloomFilter bloomFilter = this.generalBloomFilter; - - if (bloomFilter == null) { - return true; - } - // Empty file if (reader.getTrailer().getEntryCount() == 0) return false; Index: src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (working copy) @@ -246,7 +246,12 @@ } private static int optimalFunctionCount(int maxKeys, long bitSize) { - return (int) Math.ceil(Math.log(2) * (bitSize / maxKeys)); + long i = bitSize / maxKeys; + double result = Math.ceil(Math.log(2) * i); + if (result > Integer.MAX_VALUE){ + throw new IllegalArgumentException("result too large for integer value."); + } + return (int)result; } /** Private constructor used by other constructors. */ @@ -298,7 +303,7 @@ double errorRate, int hashType, int foldFactor) { ByteBloomFilter bbf = new ByteBloomFilter(hashType); - bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8, foldFactor); + bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor); long bitSize = bbf.byteSize * 8; bbf.maxKeys = (int) idealMaxKeys(bitSize, errorRate); bbf.hashCount = optimalFunctionCount(bbf.maxKeys, bitSize); Index: src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java (working copy) @@ -92,7 +92,7 @@ int hashType, int maxFold, boolean cacheOnWrite, RawComparator comparator) { chunkByteSize = ByteBloomFilter.computeFoldableByteSize( - chunkByteSizeHint * 8, maxFold); + chunkByteSizeHint * 8L, maxFold); this.errorRate = errorRate; this.hashType = hashType; Index: src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java (working copy) @@ -84,7 +84,6 @@ } } - Configuration conf = HBaseConfiguration.create(); try { Compressor c = algo.getCompressor(); algo.returnCompressor(c); Index: src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (working copy) @@ -74,13 +74,9 @@ while (!recovered) { try { try { - if (fs instanceof DistributedFileSystem) { - DistributedFileSystem dfs = (DistributedFileSystem)fs; - DistributedFileSystem.class.getMethod("recoverLease", - new Class[] {Path.class}).invoke(dfs, p); - } else { - throw new Exception("Not a DistributedFileSystem"); - } + DistributedFileSystem dfs = (DistributedFileSystem) fs; + DistributedFileSystem.class.getMethod("recoverLease", new Class[] { Path.class }).invoke( + dfs, p); } catch (InvocationTargetException ite) { // function was properly called, but threw it's own exception throw (IOException) ite.getCause(); Index: src/main/java/org/apache/hadoop/hbase/util/PoolMap.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/PoolMap.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/util/PoolMap.java (working copy) @@ -316,7 +316,7 @@ @Override public R put(R resource) { - if (size() < maxSize) { + if (super.size() < maxSize) { add(resource); } return null; @@ -355,7 +355,7 @@ @Override public R put(R resource) { - if (size() < maxSize) { + if (super.size() < maxSize) { add(resource); } return null; @@ -363,10 +363,10 @@ @Override public R get() { - if (size() < maxSize) { + if (super.size() < maxSize) { return null; } - nextResource %= size(); + nextResource %= super.size(); R resource = get(nextResource++); return resource; } Index: src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (working copy) @@ -632,7 +632,6 @@ // get table info Path hbDir = new Path(table.getConfiguration().get(HConstants.HBASE_DIR)); Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName()); - Path splitFile = new Path(tableDir, "_balancedSplit"); FileSystem fs = FileSystem.get(table.getConfiguration()); // clear the cache to forcibly refresh region information Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (revision 1327958) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (working copy) @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.thrift.generated.Hbase.deleteAll_args; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.WatchedEvent; @@ -374,6 +375,12 @@ if (this.abortable != null) this.abortable.abort(msg, new KeeperException.SessionExpiredException()); break; + + case ConnectedReadOnly: + break; + + default: + throw new IllegalStateException("Received event is not valid."); } }