commit 049e47bfc82dc7bba06653864cd066a8d8481669 Author: stack Date: Thu Jan 14 22:49:55 2016 -0800 First half diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java index 0d1c7c4..4c00160 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java @@ -60,7 +60,7 @@ public class JMXListener implements Coprocessor { * only 1 JMX instance is allowed, otherwise there is port conflict even if * we only load regionserver coprocessor on master */ - private static JMXConnectorServer jmxCS = null; + private static JMXConnectorServer JMX_CS = null; public static JMXServiceURL buildJMXServiceURL(int rmiRegistryPort, int rmiConnectorPort) throws IOException { @@ -137,8 +137,11 @@ public class JMXListener implements Coprocessor { try { // Start the JMXListener with the connection string - jmxCS = JMXConnectorServerFactory.newJMXConnectorServer(serviceUrl, jmxEnv, mbs); - jmxCS.start(); + synchronized(JMXListener.class) { + if (JMX_CS != null) throw new RuntimeException("Started by another thread?"); + JMX_CS = JMXConnectorServerFactory.newJMXConnectorServer(serviceUrl, jmxEnv, mbs); + JMX_CS.start(); + } LOG.info("ConnectorServer started!"); } catch (IOException e) { LOG.error("fail to start connector server!", e); @@ -148,10 +151,10 @@ public class JMXListener implements Coprocessor { public void stopConnectorServer() throws IOException { synchronized(JMXListener.class) { - if (jmxCS != null) { - jmxCS.stop(); + if (JMX_CS != null) { + JMX_CS.stop(); LOG.info("ConnectorServer stopped!"); - jmxCS = null; + JMX_CS = null; } } } @@ -186,7 +189,7 @@ public class JMXListener implements Coprocessor { } synchronized(JMXListener.class) { - if (jmxCS != null) { + if (JMX_CS != null) { LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java index 8c1d901..cccbae0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SizeCachedKeyValue.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.util.Bytes; * See https://issues.apache.org/jira/browse/HBASE-13448 */ @InterfaceAudience.Private +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS") public class SizeCachedKeyValue extends KeyValue { - private static final int HEAP_SIZE_OVERHEAD = Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT; private short rowLen; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 8b2772b..81ed1ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -175,7 +175,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements return; } Task task = findOrCreateOrphanTask(path); - if (task.isOrphan() && (task.incarnation == 0)) { + if (task.isOrphan() && (task.incarnation.get() == 0)) { LOG.info("resubmitting unassigned orphan task " + path); // ignore failure to resubmit. The timeout-monitor will handle it later // albeit in a more crude fashion @@ -228,7 +228,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements version = -1; } LOG.info("resubmitting task " + path); - task.incarnation++; + task.incarnation.incrementAndGet(); boolean result = resubmit(this.details.getServerName(), path, version); if (!result) { task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index d16970d..5128662 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -54,6 +54,8 @@ import org.apache.hadoop.hbase.util.Pair; * it fallbacks to the archived path. */ @InterfaceAudience.Private +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS", + justification="To be fixed but warning suppressed for now") public class HFileLink extends FileLink { private static final Log LOG = LogFactory.getLog(HFileLink.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 669ad92..1e1835f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -488,6 +488,8 @@ public class HFile { * @return an appropriate instance of HFileReader * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", + justification="Intentional") private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException { FixedFileTrailer trailer = null; @@ -498,7 +500,7 @@ public class HFile { switch (trailer.getMajorVersion()) { case 2: LOG.debug("Opening HFile v2 with v3 reader"); - // Fall through. + // Fall through. FindBugs: SF_SWITCH_FALLTHROUGH case 3 : return new HFileReaderImpl(path, trailer, fsdis, size, cacheConf, hfs, conf); default: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index fcf7b5b..7b3baef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -23,6 +23,7 @@ import java.nio.ByteBuffer; import java.security.Key; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -473,7 +474,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { private int currMemstoreTSLen; private long currMemstoreTS; // Updated but never read? - protected volatile int blockFetches; + protected AtomicInteger blockFetches = new AtomicInteger(0); protected final HFile.Reader reader; private int currTagsLen; // buffer backed keyonlyKV @@ -885,8 +886,9 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { HFileBlock block = this.curBlock; do { - if (block.getOffset() >= lastDataBlockOffset) + if (block.getOffset() >= lastDataBlockOffset) { return null; + } if (block.getOffset() < 0) { throw new IOException("Invalid block file offset: " + block); @@ -898,7 +900,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { + block.getOnDiskSizeWithHeader(), block.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread, isCompaction, true, null, getEffectiveDataBlockEncoding()); - if (block != null && !block.getBlockType().isData()) { + if (block != null && !block.getBlockType().isData()) { // Findbugs: NP_NULL_ON_SOME_PATH // Whatever block we read we will be returning it unless // it is a datablock. Just in case the blocks are non data blocks reader.returnBlock(block); @@ -1228,7 +1230,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { updateCurrBlockRef(newBlock); blockBuffer = newBlock.getBufferWithoutHeader(); readKeyValueLen(); - blockFetches++; + blockFetches.incrementAndGet(); // Reset the next indexed key this.nextIndexedKey = null; @@ -1667,7 +1669,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { updateCurrBlockRef(newBlock); ByteBuff encodedBuffer = getEncodedBuffer(newBlock); seeker.setCurrentBuffer(encodedBuffer); - blockFetches++; + blockFetches.incrementAndGet(); // Reset the next indexed key this.nextIndexedKey = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 66c7f1d..186d86b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -354,7 +354,7 @@ public class HFileWriterImpl implements HFile.Writer { // (table,startrow,hash) so can't be treated as plain byte arrays. Just skip // out without // trying to do this optimization. - if (comparator != null && comparator instanceof MetaCellComparator) { + if (comparator instanceof MetaCellComparator) { return right; } int diff = comparator.compareRows(left, right); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index d32fca7..778dcba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -1280,15 +1280,13 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private boolean useWrap = false; // Fake 'call' for failed authorization response private static final int AUTHORIZATION_FAILED_CALLID = -1; - private final Call authFailedCall = - new Call(AUTHORIZATION_FAILED_CALLID, null, null, null, null, null, this, null, 0, null, - null); + private final Call authFailedCall; private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream(); // Fake 'call' for SASL context setup private static final int SASL_CALLID = -33; - private final Call saslCall = - new Call(SASL_CALLID, this.service, null, null, null, null, this, null, 0, null, null); + private final Call saslCall; + // was authentication allowed with a fallback to simple auth private boolean authenticatedWithFallback; @@ -1320,6 +1318,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { socketSendBufferSize); } } + this.saslCall = + new Call(SASL_CALLID, this.service, null, null, null, null, this, null, 0, null, null); + this.authFailedCall = + new Call(AUTHORIZATION_FAILED_CALLID, null, null, null, null, null, this, null, 0, null, + null); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index 20ae4a6..267e094 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -135,17 +135,18 @@ public class HashTable extends Configured implements Tool { p.setProperty("endTimestamp", Long.toString(endTime)); } - FSDataOutputStream out = fs.create(path); - p.store(new OutputStreamWriter(out, Charsets.UTF_8), null); - out.close(); + try (FSDataOutputStream out = fs.create(path)) { + p.store(new OutputStreamWriter(out, Charsets.UTF_8), null); + } } void readPropertiesFile(FileSystem fs, Path path) throws IOException { - FSDataInputStream in = fs.open(path); Properties p = new Properties(); - p.load(new InputStreamReader(in, Charsets.UTF_8)); - in.close(); - + try (FSDataInputStream in = fs.open(path)) { + try (InputStreamReader isr = new InputStreamReader(in, Charsets.UTF_8)) { + p.load(isr); + } + } tableName = p.getProperty("table"); families = p.getProperty("columnFamilies"); batchSize = Long.parseLong(p.getProperty("targetBatchSize")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index d51d79a..a0bf950 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -249,6 +249,8 @@ public class Import extends Configured implements Tool { /** * A mapper that just writes out KeyValues. */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", + justification="Writables are going away and this has been this way forever") public static class KeyValueImporter extends TableMapper { private Map cfRenameMap; private Filter filter; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index fc6aec9..209fc97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -658,7 +658,7 @@ public class ImportTsv extends Configured implements Tool { "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " designates that this column should be\n" + "used as timestamp for each record. Unlike " + TsvParser.ROWKEY_COLUMN_SPEC + ", " + - TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional.\n" + + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + "\n" + "You must specify at most one column as timestamp key for each imported record.\n" + "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index ff690c8..6f0075a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -110,9 +110,7 @@ public abstract class MultiTableInputFormatBase extends @Override public void close() throws IOException { trr.close(); - if (connection != null) { - connection.close(); - } + connection.close(); } @Override @@ -145,9 +143,7 @@ public abstract class MultiTableInputFormatBase extends // If there is an exception make sure that all // resources are closed and released. trr.close(); - if (connection != null) { - connection.close(); - } + connection.close(); throw ioe; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 8d5318e..1cdb7f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -643,7 +643,7 @@ public class SplitLogManager { public volatile ServerName cur_worker_name; public volatile TaskBatch batch; public volatile TerminationStatus status; - public volatile int incarnation; + public volatile AtomicInteger incarnation = new AtomicInteger(0); public final AtomicInteger unforcedResubmits = new AtomicInteger(); public volatile boolean resubmitThresholdReached; @@ -655,7 +655,6 @@ public class SplitLogManager { } public Task() { - incarnation = 0; last_version = -1; status = IN_PROGRESS; setUnassigned(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 2b9d101..ff9e91b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -623,6 +623,8 @@ public class StoreFile { return false; } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification="Will not overflow") public static class WriterBuilder { private final Configuration conf; private final CacheConfig cacheConf; @@ -635,7 +637,6 @@ public class StoreFile { private Path filePath; private InetSocketAddress[] favoredNodes; private HFileContext fileContext; - private boolean shouldDropCacheBehind = false; public WriterBuilder(Configuration conf, CacheConfig cacheConf, FileSystem fs) { @@ -703,8 +704,8 @@ public class StoreFile { return this; } - public WriterBuilder withShouldDropCacheBehind(boolean shouldDropCacheBehind) { - this.shouldDropCacheBehind = shouldDropCacheBehind; + public WriterBuilder withShouldDropCacheBehind(boolean shouldDropCacheBehind/*NOT USED!!*/) { + // TODO: HAS NO EFFECT!!! FIX!! return this; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index 0044634..beadde6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -91,6 +91,8 @@ public class TimeRangeTracker implements Writable { * If required, update the current TimestampRange to include timestamp * @param timestamp the timestamp value to include */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS", + justification="Intentional") void includeTimestamp(final long timestamp) { // Do test outside of synchronization block. Synchronization in here can be problematic // when many threads writing one Store -- they can all pile up trying to add in here. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 660ea91..08420c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -332,7 +332,7 @@ public abstract class Compactor { throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner)? (KeyValueScanner)scanner : null; - int minFilesToCompact = Math.max(2, + long minFilesToCompact = Math.max(2L, conf.getInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, /* old name */ conf.getInt("hbase.hstore.compactionThreshold", 3))); long shippedCallSizeLimit = minFilesToCompact * HConstants.DEFAULT_BLOCKSIZE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index 27f019a..7f2d2f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -43,18 +43,20 @@ import org.apache.zookeeper.KeeperException.SessionExpiredException; * target cluster is an HBase cluster. */ @InterfaceAudience.Private +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS", + justification="Thinks zkw needs to be synchronized access but should be fine as is.") public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint implements Abortable { private static final Log LOG = LogFactory.getLog(HBaseReplicationEndpoint.class); - private ZooKeeperWatcher zkw = null; + private ZooKeeperWatcher zkw = null; // FindBugs: MT_CORRECTNESS private List regionServers = new ArrayList(0); - private volatile long lastRegionServerUpdate; + private long lastRegionServerUpdate; protected void disconnect() { - if (zkw != null){ + if (zkw != null) { zkw.close(); } } @@ -181,7 +183,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * Set the list of region servers for that peer * @param regionServers list of addresses for the region servers */ - public void setRegionServers(List regionServers) { + public synchronized void setRegionServers(List regionServers) { this.regionServers = regionServers; lastRegionServerUpdate = System.currentTimeMillis(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index 30153f8..de87ac3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -115,7 +115,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { * Skips the entries which has original seqId. Only entries persisted via distributed log replay * have their original seq Id fields set. */ - private class SkipReplayedEditsFilter extends BaseWALEntryFilter { + private static class SkipReplayedEditsFilter extends BaseWALEntryFilter { @Override public Entry filter(Entry entry) { // if orig seq id is set, skip replaying the entry diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index 3541ade..06a2298 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -280,7 +280,6 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService Configuration conf = env.getConfiguration(); fs = FileSystem.get(conf); for(Pair el: familyPaths) { - Path p = new Path(el.getSecond()); Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst())); if(!fs.exists(stageFamily)) { fs.mkdirs(stageFamily); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index e98f7a5..f7479cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -338,12 +338,6 @@ public class TableAuthManager implements Closeable { private boolean authorize(List perms, TableName table, byte[] family, - Permission.Action action) { - return authorize(perms, table, family, null, action); - } - - private boolean authorize(List perms, - TableName table, byte[] family, byte[] qualifier, Permission.Action action) { if (perms != null) { for (TablePermission p : perms) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 1f2bec4..72f4598 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -124,7 +124,7 @@ public class AuthenticationTokenSecretManager } @Override - protected byte[] createPassword(AuthenticationTokenIdentifier identifier) { + protected synchronized byte[] createPassword(AuthenticationTokenIdentifier identifier) { long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey secretKey = currentKey; identifier.setKeyId(secretKey.getKeyId()); @@ -229,7 +229,7 @@ public class AuthenticationTokenSecretManager return true; } - AuthenticationKey getCurrentKey() { + synchronized AuthenticationKey getCurrentKey() { return currentKey; } @@ -338,8 +338,11 @@ public class AuthenticationTokenSecretManager // clear any expired removeExpiredKeys(); - - if (lastKeyUpdate + keyUpdateInterval < now) { + long localLastKeyUpdate; + synchronized (this) { + localLastKeyUpdate = lastKeyUpdate; + } + if (localLastKeyUpdate + keyUpdateInterval < now) { // roll a new master key rollCurrentKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index b025758..347f3da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -274,8 +274,10 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements // Read the entire labels table and populate the zk if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = true; - this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() + synchronized (this) { + this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() .contains(AccessController.class.getName()); + } // Defer the init of VisibilityLabelService on labels region until it is in recovering state. if (!e.getEnvironment().getRegion().isRecovering()) { initVisibilityLabelService(e.getEnvironment()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index a574410..ac9b165 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -270,7 +270,7 @@ public class ExportSnapshot extends Configured implements Tool { InputStream in = openSourceFile(context, inputInfo); int bandwidthMB = context.getConfiguration().getInt(CONF_BANDWIDTH_MB, 100); if (Integer.MAX_VALUE != bandwidthMB) { - in = new ThrottledInputStream(new BufferedInputStream(in), bandwidthMB * 1024 * 1024); + in = new ThrottledInputStream(new BufferedInputStream(in), bandwidthMB * 1024L * 1024L); } try { @@ -643,7 +643,6 @@ public class ExportSnapshot extends Configured implements Tool { @Override public List getSplits(JobContext context) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); - String snapshotName = conf.get(CONF_SNAPSHOT_NAME); Path snapshotDir = new Path(conf.get(CONF_SNAPSHOT_DIR)); FileSystem fs = FileSystem.get(snapshotDir.toUri(), conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index c5db43e..95803f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -280,6 +280,8 @@ public final class SnapshotInfo extends Configured implements Tool { private SnapshotManifest snapshotManifest; @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", + justification="Intentional") public int run(String[] args) throws IOException, InterruptedException { final Configuration conf = getConf(); boolean listSnapshots = false; @@ -317,7 +319,7 @@ public final class SnapshotInfo extends Configured implements Tool { printUsageAndExit(); } } catch (Exception e) { - printUsageAndExit(); + printUsageAndExit(); // FindBugs: REC_CATCH_EXCEPTION } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9dbeed7..c066803 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -3564,9 +3564,8 @@ public class HBaseFsck extends Configured implements Closeable { this.metaEntry = metaEntry; } - public int getReplicaId() { - if (metaEntry != null) return metaEntry.getReplicaId(); - return deployedReplicaId; + public synchronized int getReplicaId() { + return metaEntry != null? metaEntry.getReplicaId(): deployedReplicaId; } public synchronized void addServer(HRegionInfo hri, ServerName server) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java index 7dc6fbf..98ce80d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java @@ -68,12 +68,17 @@ public class IdReadWriteLock { /** For testing */ @VisibleForTesting int purgeAndGetEntryPoolSize() { - System.gc(); + gc(); Threads.sleep(200); lockPool.purge(); return lockPool.size(); } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional") + private void gc() { + System.gc(); + } + @VisibleForTesting public void waitForWaiters(long id, int numWaiters) throws InterruptedException { for (ReentrantReadWriteLock readWriteLock;;) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java index 309bd4a..7c89f11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -104,11 +104,8 @@ public class MetaUtils { * @return HRegion for meta region * @throws IOException e */ - public HRegion getMetaRegion() throws IOException { - if (this.metaRegion == null) { - openMetaRegion(); - } - return this.metaRegion; + public synchronized HRegion getMetaRegion() throws IOException { + return this.metaRegion == null? openMetaRegion(): this.metaRegion; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index ea7034a..f661e0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -455,9 +455,11 @@ public class RegionMover extends AbstractHBaseTool { } } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DLS_DEAD_LOCAL_STORE", + justification="FB is wrong; its size is read") private void unloadRegions(Admin admin, String server, ArrayList regionServers, boolean ack, List movedRegions) throws Exception { - List regionsToMove = new ArrayList(); + List regionsToMove = new ArrayList();// FindBugs: DLS_DEAD_LOCAL_STORE regionsToMove = getRegions(this.conf, server); if (regionsToMove.size() == 0) { LOG.info("No Regions to move....Quitting now"); @@ -597,7 +599,7 @@ public class RegionMover extends AbstractHBaseTool { * Move Regions without Acknowledging.Usefule in case of RS shutdown as we might want to shut the * RS down anyways and not abort on a stuck region. Improves movement performance */ - private class MoveWithoutAck implements Callable { + private static class MoveWithoutAck implements Callable { private Admin admin; private HRegionInfo region; private String targetServer; @@ -764,7 +766,7 @@ public class RegionMover extends AbstractHBaseTool { try { br = new BufferedReader(new FileReader(f)); while ((line = br.readLine()) != null) { - line.trim(); + line = line.trim(); if (!line.equals("")) { excludeServers.add(line); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java index 75d77ed..4f1760f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java @@ -185,7 +185,7 @@ public class SortedList implements List, RandomAccess { } @Override - public E get(int index) { + public synchronized E get(int index) { return list.get(index); }