Index: dev-support/findbugs-exclude.xml
===================================================================
--- dev-support/findbugs-exclude.xml (revision 1325210)
+++ dev-support/findbugs-exclude.xml (working copy)
@@ -48,4 +48,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Index: dev-support/test-patch.properties
===================================================================
--- dev-support/test-patch.properties (revision 1325210)
+++ dev-support/test-patch.properties (working copy)
@@ -19,5 +19,5 @@
# Please update the per-module test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=84
-OK_FINDBUGS_WARNINGS=549
+OK_FINDBUGS_WARNINGS=537
OK_JAVADOC_WARNINGS=169
Index: src/main/java/org/apache/hadoop/hbase/HServerLoad.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/HServerLoad.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/HServerLoad.java (working copy)
@@ -469,7 +469,7 @@
float compactionProgressPct = Float.NaN;
if( this.totalCompactingKVs > 0 ) {
compactionProgressPct = Float.valueOf(
- this.currentCompactedKVs / this.totalCompactingKVs);
+ (float)this.currentCompactedKVs / this.totalCompactingKVs);
}
sb = Strings.appendKeyValue(sb, "compactionProgressPct",
compactionProgressPct);
Index: src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (working copy)
@@ -198,10 +198,6 @@
if (key == null || length <= 0) {
throw new IOException("Key cannot be null or empty");
}
- if (length > HFile.MAXIMUM_KEY_LENGTH) {
- throw new IOException("Key length " + length + " > "
- + HFile.MAXIMUM_KEY_LENGTH);
- }
if (lastKeyBuffer != null) {
int keyComp = comparator.compare(lastKeyBuffer, lastKeyOffset,
lastKeyLength, key, offset, length);
Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy)
@@ -883,7 +883,7 @@
offset = Bytes.putLong(dest, offset, prevOffset);
offset = Bytes.putByte(dest, offset, checksumType.getCode());
offset = Bytes.putInt(dest, offset, bytesPerChecksum);
- offset = Bytes.putInt(dest, offset, onDiskDataSize);
+ Bytes.putInt(dest, offset, onDiskDataSize);
}
/**
Index: src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy)
@@ -1985,7 +1985,6 @@
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
- ke = e;
}
}
// If we get here, don't understand whats going on -- abort.
Index: src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java (working copy)
@@ -50,6 +50,6 @@
* @return float
*/
public float getProgressPct() {
- return currentCompactedKVs / totalCompactingKVs;
+ return (float)currentCompactedKVs / totalCompactingKVs;
}
}
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -2339,7 +2339,7 @@
// Lock row
Integer lid = getLock(lockId, get.getRow(), true);
- List result = new ArrayList();
+ List result = null;
try {
result = get(get, false);
Index: src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java (working copy)
@@ -88,7 +88,7 @@
long getSizeToCheck(final int tableRegionsCount) {
return tableRegionsCount == 0? getDesiredMaxFileSize():
Math.min(getDesiredMaxFileSize(),
- this.flushSize * (tableRegionsCount * tableRegionsCount));
+ this.flushSize * (tableRegionsCount * (long)tableRegionsCount));
}
/**
Index: src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (working copy)
@@ -298,7 +298,7 @@
double errorRate, int hashType, int foldFactor) {
ByteBloomFilter bbf = new ByteBloomFilter(hashType);
- bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8, foldFactor);
+ bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor);
long bitSize = bbf.byteSize * 8;
bbf.maxKeys = (int) idealMaxKeys(bitSize, errorRate);
bbf.hashCount = optimalFunctionCount(bbf.maxKeys, bitSize);
Index: src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java (working copy)
@@ -92,7 +92,7 @@
int hashType, int maxFold, boolean cacheOnWrite,
RawComparator comparator) {
chunkByteSize = ByteBloomFilter.computeFoldableByteSize(
- chunkByteSizeHint * 8, maxFold);
+ chunkByteSizeHint * 8L, maxFold);
this.errorRate = errorRate;
this.hashType = hashType;
Index: src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java (working copy)
@@ -84,7 +84,6 @@
}
}
- Configuration conf = HBaseConfiguration.create();
try {
Compressor c = algo.getCompressor();
algo.returnCompressor(c);
Index: src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (working copy)
@@ -74,13 +74,9 @@
while (!recovered) {
try {
try {
- if (fs instanceof DistributedFileSystem) {
- DistributedFileSystem dfs = (DistributedFileSystem)fs;
- DistributedFileSystem.class.getMethod("recoverLease",
- new Class[] {Path.class}).invoke(dfs, p);
- } else {
- throw new Exception("Not a DistributedFileSystem");
- }
+ DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem.class.getMethod("recoverLease", new Class[] { Path.class }).invoke(
+ dfs, p);
} catch (InvocationTargetException ite) {
// function was properly called, but threw it's own exception
throw (IOException) ite.getCause();
Index: src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (revision 1325210)
+++ src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (working copy)
@@ -632,7 +632,6 @@
// get table info
Path hbDir = new Path(table.getConfiguration().get(HConstants.HBASE_DIR));
Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName());
- Path splitFile = new Path(tableDir, "_balancedSplit");
FileSystem fs = FileSystem.get(table.getConfiguration());
// clear the cache to forcibly refresh region information