From 70ce973da1d7a28f94b969ce5bf8fc2ba4b1632d Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Mon, 22 Jan 2018 15:32:06 +0100 Subject: [PATCH] HBASE-19811 Fix findbugs and error-prone warnings in hbase-server (branch-2) --- .../hbase/SslRMIServerSocketFactorySecure.java | 1 + .../hadoop/hbase/client/locking/EntityLock.java | 1 + .../hadoop/hbase/conf/ConfigurationManager.java | 2 +- .../coordination/ZkSplitLogWorkerCoordination.java | 2 +- .../coprocessor/BaseRowProcessorEndpoint.java | 8 +- .../hadoop/hbase/coprocessor/CoprocessorHost.java | 1 + .../hbase/coprocessor/ObserverContextImpl.java | 3 + .../hbase/coprocessor/ReadOnlyConfiguration.java | 14 +- .../apache/hadoop/hbase/filter/FilterWrapper.java | 1 + .../org/apache/hadoop/hbase/fs/HFileSystem.java | 83 ++-- .../hadoop/hbase/io/FSDataInputStreamWrapper.java | 11 +- .../hadoop/hbase/io/HalfStoreFileReader.java | 8 + .../java/org/apache/hadoop/hbase/io/Reference.java | 1 + .../io/asyncfs/FanOutOneBlockAsyncDFSOutput.java | 1 + .../apache/hadoop/hbase/io/hfile/BlockCache.java | 1 + .../hadoop/hbase/io/hfile/CompoundBloomFilter.java | 1 + .../hadoop/hbase/io/hfile/FixedFileTrailer.java | 10 +- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 68 ++-- .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 2 +- .../apache/hadoop/hbase/io/hfile/HFileScanner.java | 1 + .../hadoop/hbase/io/hfile/LruBlockCache.java | 4 + .../hadoop/hbase/io/hfile/LruCachedBlock.java | 1 + .../hadoop/hbase/io/hfile/LruCachedBlockQueue.java | 1 + .../hbase/io/hfile/bucket/BucketAllocator.java | 1 + .../hadoop/hbase/io/hfile/bucket/BucketCache.java | 18 +- .../hbase/io/hfile/bucket/CachedEntryQueue.java | 1 + .../org/apache/hadoop/hbase/ipc/BufferChain.java | 2 +- .../ipc/FastPathBalancedQueueRpcExecutor.java | 1 + .../hadoop/hbase/ipc/ServerRpcConnection.java | 5 +- .../apache/hadoop/hbase/ipc/SimpleRpcServer.java | 1 + .../hbase/master/ClusterStatusPublisher.java | 9 +- .../org/apache/hadoop/hbase/master/DeadServer.java | 1 + .../hadoop/hbase/master/HMasterCommandLine.java | 3 +- .../apache/hadoop/hbase/master/LoadBalancer.java | 1 + .../MasterAnnotationReadingPriorityFunction.java | 1 + .../hadoop/hbase/master/MasterCoprocessorHost.java | 1 + .../hadoop/hbase/master/MobCompactionChore.java | 2 +- .../hadoop/hbase/master/RegionServerTracker.java | 2 +- .../hadoop/hbase/master/SplitLogManager.java | 2 +- .../assignment/MergeTableRegionsProcedure.java | 2 +- .../hbase/master/assignment/RegionStates.java | 7 +- .../master/balancer/FavoredStochasticBalancer.java | 5 +- .../master/balancer/RegionLocationFinder.java | 27 +- .../hbase/master/balancer/SimpleLoadBalancer.java | 1 + .../master/balancer/StochasticLoadBalancer.java | 6 +- .../hadoop/hbase/master/cleaner/CleanerChore.java | 4 +- .../hadoop/hbase/master/cleaner/HFileCleaner.java | 2 +- .../hadoop/hbase/master/cleaner/LogCleaner.java | 2 +- .../hadoop/hbase/master/locking/LockProcedure.java | 2 +- .../master/procedure/ProcedurePrepareLatch.java | 4 + .../master/procedure/RSProcedureDispatcher.java | 9 + .../master/snapshot/SnapshotHFileCleaner.java | 2 + .../org/apache/hadoop/hbase/mob/CachedMobFile.java | 1 + .../hadoop/hbase/mob/ExpiredMobFileCleaner.java | 1 + .../PartitionedMobCompactionRequest.java | 1 + .../hbase/monitoring/MonitoredRPCHandlerImpl.java | 12 + .../hadoop/hbase/monitoring/MonitoredTaskImpl.java | 1 + .../hadoop/hbase/monitoring/TaskMonitor.java | 2 +- .../apache/hadoop/hbase/procedure/Procedure.java | 2 +- .../hbase/procedure/ProcedureManagerHost.java | 6 +- .../hadoop/hbase/procedure/Subprocedure.java | 1 + .../hbase/procedure/ZKProcedureCoordinator.java | 12 +- .../hbase/procedure/ZKProcedureMemberRpcs.java | 1 + .../hadoop/hbase/procedure/ZKProcedureUtil.java | 1 + .../hadoop/hbase/quotas/MasterQuotaManager.java | 4 +- .../hadoop/hbase/quotas/QuotaObserverChore.java | 3 +- .../apache/hadoop/hbase/quotas/RateLimiter.java | 1 + .../quotas/RegionServerSpaceQuotaManager.java | 2 +- .../quotas/SpaceQuotaSnapshotNotifierFactory.java | 4 +- .../hbase/regionserver/AbstractMemStore.java | 2 +- .../AdaptiveMemStoreCompactionStrategy.java | 3 + .../regionserver/CellChunkImmutableSegment.java | 2 +- .../apache/hadoop/hbase/regionserver/CellSet.java | 30 ++ .../hbase/regionserver/CompactingMemStore.java | 4 +- .../regionserver/CompositeImmutableSegment.java | 1 + .../hbase/regionserver/DateTieredStoreEngine.java | 1 + .../hbase/regionserver/DefaultStoreFlusher.java | 8 +- .../hadoop/hbase/regionserver/HMobStore.java | 56 +-- .../apache/hadoop/hbase/regionserver/HRegion.java | 5 +- .../hadoop/hbase/regionserver/HRegionServer.java | 10 +- .../regionserver/HRegionServerCommandLine.java | 2 + .../apache/hadoop/hbase/regionserver/HStore.java | 84 ++-- .../IncreasingToUpperBoundRegionSplitPolicy.java | 2 +- .../hadoop/hbase/regionserver/InternalScanner.java | 1 + .../hadoop/hbase/regionserver/KeyValueHeap.java | 5 + .../hadoop/hbase/regionserver/KeyValueScanner.java | 1 + .../hbase/regionserver/MemStoreCompactor.java | 4 +- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 8 +- .../MemStoreMergerSegmentsIterator.java | 1 + .../MetricsRegionServerWrapperImpl.java | 4 +- .../regionserver/MetricsRegionWrapperImpl.java | 2 +- .../MultiVersionConcurrencyControl.java | 9 +- .../hadoop/hbase/regionserver/RSRpcServices.java | 2 +- .../regionserver/RegionServerCoprocessorHost.java | 1 + .../hadoop/hbase/regionserver/ScannerContext.java | 8 +- .../hbase/regionserver/ServerNonceManager.java | 2 +- .../hbase/regionserver/SteppingSplitPolicy.java | 1 + .../hbase/regionserver/StoreFileScanner.java | 6 + .../hbase/regionserver/StripeStoreFlusher.java | 8 +- .../hbase/regionserver/compactions/Compactor.java | 7 +- .../compactions/DateTieredCompactionPolicy.java | 1 + .../compactions/RatioBasedCompactionPolicy.java | 1 + .../compactions/SortedCompactionPolicy.java | 2 + .../querymatcher/ExplicitColumnTracker.java | 4 + .../querymatcher/ScanWildcardColumnTracker.java | 2 + .../hadoop/hbase/regionserver/wal/AsyncFSWAL.java | 2 +- .../hadoop/hbase/regionserver/wal/FSHLog.java | 20 +- .../hadoop/hbase/regionserver/wal/FSWALEntry.java | 1 + .../hbase/regionserver/wal/ProtobufLogReader.java | 6 +- .../hbase/replication/BaseReplicationEndpoint.java | 2 +- .../DefaultSourceFSConfigurationProvider.java | 2 +- .../replication/regionserver/Replication.java | 5 + .../replication/regionserver/ReplicationSink.java | 4 +- .../regionserver/ReplicationSource.java | 19 +- .../regionserver/ReplicationSourceManager.java | 2 +- .../regionserver/ReplicationSourceWALReader.java | 3 +- .../hbase/security/access/AccessControlFilter.java | 1 + .../hbase/security/access/AccessController.java | 4 +- .../hadoop/hbase/security/access/AuthResult.java | 2 + .../token/AuthenticationTokenSecretManager.java | 1 + .../security/visibility/ExpressionParser.java | 4 +- .../security/visibility/VisibilityController.java | 4 +- .../VisibilityNewVersionBehaivorTracker.java | 2 +- .../visibility/expression/LeafExpressionNode.java | 1 + .../expression/NonLeafExpressionNode.java | 1 + .../security/visibility/expression/Operator.java | 3 +- .../apache/hadoop/hbase/snapshot/SnapshotInfo.java | 2 +- .../hadoop/hbase/snapshot/SnapshotManifestV1.java | 5 + .../hadoop/hbase/snapshot/SnapshotManifestV2.java | 5 + .../hadoop/hbase/tool/LoadIncrementalHFiles.java | 2 +- .../hbase/util/BoundedPriorityBlockingQueue.java | 14 + .../org/apache/hadoop/hbase/util/FSMapRUtils.java | 1 + .../apache/hadoop/hbase/util/FSRegionScanner.java | 2 +- .../hadoop/hbase/util/FSTableDescriptors.java | 2 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 6 +- .../java/org/apache/hadoop/hbase/util/IdLock.java | 1 + .../apache/hadoop/hbase/util/RegionSplitter.java | 2 +- .../apache/hadoop/hbase/util/RowBloomContext.java | 1 + .../hadoop/hbase/util/ShutdownHookManager.java | 6 +- .../hadoop/hbase/wal/AbstractFSWALProvider.java | 3 +- .../hadoop/hbase/wal/DisabledWALProvider.java | 3 +- .../apache/hadoop/hbase/wal/FSHLogProvider.java | 2 +- .../hadoop/hbase/wal/RegionGroupingProvider.java | 13 +- .../main/java/org/apache/hadoop/hbase/wal/WAL.java | 1 + .../org/apache/hadoop/hbase/wal/WALFactory.java | 16 +- .../org/apache/hadoop/hbase/wal/WALKeyImpl.java | 4 +- .../org/apache/hadoop/hbase/wal/WALSplitter.java | 6 +- .../hadoop/hbase/AcidGuaranteesTestTool.java | 4 + .../org/apache/hadoop/hbase/HBaseTestCase.java | 3 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 9 +- .../org/apache/hadoop/hbase/MiniHBaseCluster.java | 7 +- .../apache/hadoop/hbase/MultithreadedTestUtil.java | 2 + .../hadoop/hbase/TestHDFSBlocksDistribution.java | 1 + .../org/apache/hadoop/hbase/TestIOFencing.java | 11 +- .../hbase/TestMetaTableAccessorNoCluster.java | 1 + .../apache/hadoop/hbase/TestMetaTableLocator.java | 6 +- .../hadoop/hbase/TestMovedRegionsCleaner.java | 1 + .../org/apache/hadoop/hbase/TestMultiVersions.java | 2 + .../org/apache/hadoop/hbase/TestServerName.java | 5 +- .../TestServerSideScanMetricsFromClientSide.java | 8 +- .../org/apache/hadoop/hbase/client/TestAdmin1.java | 2 +- .../org/apache/hadoop/hbase/client/TestAdmin2.java | 11 +- .../hbase/client/TestAsyncClusterAdminApi2.java | 2 + .../client/TestAsyncDecommissionAdminApi.java | 2 +- .../hbase/client/TestAsyncProcedureAdminApi.java | 2 +- .../hbase/client/TestAsyncRegionAdminApi.java | 25 +- .../TestAsyncReplicationAdminApiWithClusters.java | 1 + .../hbase/client/TestAsyncSnapshotAdminApi.java | 105 +++-- .../hbase/client/TestAsyncTableAdminApi.java | 2 +- .../hadoop/hbase/client/TestAsyncTableBatch.java | 6 +- .../TestAvoidCellReferencesIntoShippedBlocks.java | 15 +- .../hadoop/hbase/client/TestClientPushback.java | 8 +- .../hbase/client/TestConnectionImplementation.java | 3 +- .../apache/hadoop/hbase/client/TestFastFail.java | 3 +- .../hadoop/hbase/client/TestFromClientSide.java | 356 ++++++++--------- .../hadoop/hbase/client/TestFromClientSide3.java | 58 +-- .../apache/hadoop/hbase/client/TestMetaCache.java | 3 + .../hbase/client/TestMultipleTimestamps.java | 4 +- .../hadoop/hbase/client/TestReplicasClient.java | 14 +- .../client/TestRestoreSnapshotFromClient.java | 2 +- .../org/apache/hadoop/hbase/client/TestResult.java | 4 +- .../hbase/client/TestServerBusyException.java | 4 +- .../hadoop/hbase/client/TestSizeFailures.java | 4 +- .../hbase/client/TestSmallReversedScanner.java | 4 +- .../hbase/client/TestSnapshotFromClient.java | 2 +- .../hadoop/hbase/client/TestSnapshotMetadata.java | 4 +- .../hadoop/hbase/client/TestSnapshotWithAcl.java | 2 +- .../hbase/client/TestSplitOrMergeStatus.java | 8 +- .../hadoop/hbase/client/TestTimestampsFilter.java | 20 +- .../hbase/client/TestUpdateConfiguration.java | 17 +- .../TestReplicationAdminWithClusters.java | 10 +- .../hbase/conf/TestConfigurationManager.java | 1 + .../hadoop/hbase/constraint/TestConstraint.java | 10 +- .../coprocessor/SampleRegionWALCoprocessor.java | 3 +- .../hbase/coprocessor/SimpleRegionObserver.java | 4 +- .../coprocessor/TestCoprocessorConfiguration.java | 14 +- .../hbase/coprocessor/TestCoprocessorHost.java | 2 +- .../TestMasterCoprocessorExceptionWithAbort.java | 2 +- .../hbase/coprocessor/TestMasterObserver.java | 1 + .../hadoop/hbase/coprocessor/TestWALObserver.java | 1 + .../hadoop/hbase/executor/TestExecutorService.java | 2 +- .../hadoop/hbase/filter/TestBitComparator.java | 8 +- .../org/apache/hadoop/hbase/filter/TestFilter.java | 9 +- .../hbase/filter/TestFilterFromRegionSide.java | 1 + .../apache/hadoop/hbase/filter/TestFilterList.java | 3 +- .../hbase/filter/TestFilterSerialization.java | 4 +- .../hbase/filter/TestFuzzyRowFilterEndToEnd.java | 2 +- .../hbase/filter/TestInvocationRecordFilter.java | 4 + .../hadoop/hbase/filter/TestParseFilter.java | 144 +++---- .../filter/TestSingleColumnValueExcludeFilter.java | 2 +- .../apache/hadoop/hbase/fs/TestBlockReorder.java | 5 +- .../org/apache/hadoop/hbase/io/TestHFileLink.java | 2 +- .../org/apache/hadoop/hbase/io/TestHeapSize.java | 4 +- .../io/encoding/TestBufferedDataBlockEncoder.java | 12 +- .../hbase/io/encoding/TestDataBlockEncoders.java | 18 +- .../io/encoding/TestLoadAndSwitchEncodeOnDisk.java | 2 + .../io/encoding/TestSeekBeforeWithReverseScan.java | 12 +- .../hadoop/hbase/io/hfile/CacheTestUtils.java | 9 +- .../apache/hadoop/hbase/io/hfile/NanoTimer.java | 1 + .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 2 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 2 +- .../hadoop/hbase/io/hfile/TestHFileEncryption.java | 2 +- .../hadoop/hbase/io/hfile/TestLruBlockCache.java | 4 +- .../hbase/io/hfile/bucket/TestBucketCache.java | 20 +- .../org/apache/hadoop/hbase/ipc/TestNettyIPC.java | 2 +- .../apache/hadoop/hbase/ipc/TestProtoBufRpc.java | 2 +- .../hadoop/hbase/ipc/TestRpcClientLeaks.java | 7 +- .../hadoop/hbase/ipc/TestSimpleRpcScheduler.java | 8 +- .../hbase/mapreduce/MapreduceTestingShim.java | 4 + .../hbase/master/TestAssignmentListener.java | 11 +- .../hadoop/hbase/master/TestMasterFailover.java | 4 +- .../hadoop/hbase/master/TestMasterNoCluster.java | 1 + .../TestMasterOperationsForRegionReplicas.java | 5 +- .../hadoop/hbase/master/TestMasterShutdown.java | 1 + .../hbase/master/TestMetaShutdownHandler.java | 6 +- .../hadoop/hbase/master/TestRegionPlacement.java | 4 +- .../hadoop/hbase/master/TestSplitLogManager.java | 12 +- .../hadoop/hbase/master/TestTableStateManager.java | 5 +- .../master/assignment/MockMasterServices.java | 2 + .../assignment/TestSplitTableRegionProcedure.java | 4 +- .../hbase/master/balancer/BalancerTestBase.java | 1 + .../hbase/master/cleaner/TestLogsCleaner.java | 1 + .../cleaner/TestReplicationHFileCleaner.java | 1 + .../master/cleaner/TestSnapshotFromMaster.java | 2 +- .../hbase/master/locking/TestLockProcedure.java | 13 +- ...terProcedureSchedulerPerformanceEvaluation.java | 4 + .../procedure/TestModifyNamespaceProcedure.java | 8 +- .../master/procedure/TestModifyTableProcedure.java | 25 +- .../procedure/TestWALProcedureStoreOnHDFS.java | 2 +- .../master/snapshot/TestSnapshotFileCache.java | 1 + .../master/snapshot/TestSnapshotHFileCleaner.java | 3 +- .../apache/hadoop/hbase/mob/TestCachedMobFile.java | 36 +- .../org/apache/hadoop/hbase/mob/TestMobFile.java | 14 +- .../apache/hadoop/hbase/mob/TestMobFileCache.java | 15 +- .../apache/hadoop/hbase/mob/TestMobFileName.java | 11 +- .../hbase/namespace/TestNamespaceAuditor.java | 4 +- .../hadoop/hbase/procedure/TestProcedure.java | 6 +- .../hbase/procedure/TestProcedureCoordinator.java | 3 +- .../hadoop/hbase/procedure/TestZKProcedure.java | 2 +- .../hadoop/hbase/quotas/TestQuotaStatusRPCs.java | 2 +- .../quotas/TestSuperUserQuotaPermissions.java | 1 + .../regionserver/EncodedSeekPerformanceTest.java | 2 +- .../hadoop/hbase/regionserver/MockHStoreFile.java | 2 + .../hbase/regionserver/StatefulStoreMockMaker.java | 1 + .../hbase/regionserver/TestAtomicOperation.java | 6 +- .../hbase/regionserver/TestBlocksScanned.java | 1 + .../hadoop/hbase/regionserver/TestBulkLoad.java | 6 +- .../hadoop/hbase/regionserver/TestCellFlatSet.java | 25 +- .../hbase/regionserver/TestCellSkipListSet.java | 1 + .../hbase/regionserver/TestCompactingMemStore.java | 5 +- .../TestCompactingToCellFlatMapMemStore.java | 4 +- .../hadoop/hbase/regionserver/TestCompaction.java | 7 +- .../TestCompactionArchiveConcurrentClose.java | 2 + .../regionserver/TestCompactionFileNotFound.java | 2 +- .../hbase/regionserver/TestDefaultMemStore.java | 4 +- .../regionserver/TestGetClosestAtOrBefore.java | 3 +- .../hadoop/hbase/regionserver/TestHRegion.java | 79 ++-- .../regionserver/TestHRegionReplayEvents.java | 8 +- .../regionserver/TestHRegionServerBulkLoad.java | 2 + .../hadoop/hbase/regionserver/TestHStore.java | 9 +- .../hadoop/hbase/regionserver/TestHStoreFile.java | 33 +- .../hbase/regionserver/TestKeyValueHeap.java | 3 +- .../hbase/regionserver/TestMajorCompaction.java | 9 +- .../hadoop/hbase/regionserver/TestMemStoreLAB.java | 4 +- .../hbase/regionserver/TestMinorCompaction.java | 3 +- .../TestMultiVersionConcurrencyControl.java | 2 + .../hadoop/hbase/regionserver/TestPriorityRpc.java | 30 +- .../hbase/regionserver/TestRegionReplicas.java | 2 +- .../TestRegionReplicasWithModifyTable.java | 12 +- .../regionserver/TestRegionServerAccounting.java | 20 +- .../regionserver/TestRegionServerNoMaster.java | 12 +- .../hadoop/hbase/regionserver/TestScanner.java | 4 +- .../regionserver/TestScannerWithBulkload.java | 1 + .../hbase/regionserver/TestSplitLogWorker.java | 2 +- .../TestSplitTransactionOnCluster.java | 8 +- .../hbase/regionserver/TestStoreScanner.java | 6 + .../regionserver/TestSyncTimeRangeTracker.java | 7 +- .../hadoop/hbase/regionserver/TestWALLockup.java | 2 + .../TestWalAndCompactingMemStoreFlush.java | 2 +- .../compactions/PerfTestCompactionPolicies.java | 6 +- .../regionserver/compactions/TestCompactor.java | 2 + .../querymatcher/TestUserScanQueryMatcher.java | 4 +- .../hbase/regionserver/wal/AbstractTestFSWAL.java | 2 +- .../regionserver/wal/AbstractTestWALReplay.java | 12 +- .../regionserver/wal/InstrumentedLogWriter.java | 2 +- .../replication/TestNamespaceReplication.java | 4 +- .../replication/TestPerTableCFReplication.java | 4 +- .../hbase/replication/TestReplicationBase.java | 2 +- .../TestReplicationDisableInactivePeer.java | 2 +- .../hbase/replication/TestReplicationKillRS.java | 1 + .../replication/TestReplicationSmallTests.java | 4 +- .../replication/TestReplicationSyncUpTool.java | 4 +- .../hbase/replication/TestReplicationWithTags.java | 2 +- .../TestRegionReplicaReplicationEndpoint.java | 8 +- .../replication/regionserver/TestReplicator.java | 60 ++- .../regionserver/TestWALEntryStream.java | 2 +- .../hadoop/hbase/security/TestSecureIPC.java | 3 +- .../org/apache/hadoop/hbase/security/TestUser.java | 3 + .../security/access/TestAccessControlFilter.java | 3 + .../security/access/TestAccessController.java | 9 +- .../hbase/security/token/TestZKSecretWatcher.java | 2 + .../token/TestZKSecretWatcherRefreshKeys.java | 2 + .../TestDefaultScanLabelGeneratorStack.java | 4 + .../TestEnforcingScanLabelGenerator.java | 4 + ...tVisibilityLabelReplicationWithExpAsString.java | 1 + .../security/visibility/TestVisibilityLabels.java | 41 +- ...tVisibilityLabelsOnNewVersionBehaviorTable.java | 1 + ...tVisibilityLabelsOpWithDifferentUsersNoACL.java | 5 + .../TestVisibilityLabelsReplication.java | 3 + .../visibility/TestVisibilityLabelsWithACL.java | 9 + ...estVisibilityLabelsWithCustomVisLabService.java | 2 + ...VisibilityLabelsWithDefaultVisLabelService.java | 4 + .../TestVisibilityLabelsWithDeletes.java | 426 ++++++++++----------- .../TestVisibilityLabelsWithSLGStack.java | 1 + .../visibility/TestVisibilityLablesWithGroups.java | 8 + .../visibility/TestVisibilityWithCheckAuths.java | 2 +- .../visibility/TestWithDisabledAuthorization.java | 8 +- .../snapshot/TestFlushSnapshotFromClient.java | 2 + .../hadoop/hbase/tool/MapreduceTestingShim.java | 4 + .../hbase/tool/TestLoadIncrementalHFiles.java | 2 +- .../TestLoadIncrementalHFilesSplitRecovery.java | 2 +- .../hadoop/hbase/util/MultiThreadedAction.java | 4 +- .../hadoop/hbase/util/MultiThreadedReader.java | 3 +- .../util/TestBoundedPriorityBlockingQueue.java | 6 +- .../hadoop/hbase/util/TestByteBuffUtils.java | 4 +- .../apache/hadoop/hbase/util/TestFSHDFSUtils.java | 3 +- .../apache/hadoop/hbase/util/TestFSVisitor.java | 7 +- .../hadoop/hbase/util/TestHBaseFsckEncryption.java | 10 +- .../apache/hadoop/hbase/util/TestHBaseFsckMOB.java | 22 +- .../hadoop/hbase/util/TestJSONMetricUtil.java | 2 +- .../hbase/util/TestMiniClusterLoadParallel.java | 1 + .../hbase/util/TestRegionSplitCalculator.java | 4 +- .../hbase/util/test/LoadTestDataGenerator.java | 7 +- .../apache/hadoop/hbase/wal/TestWALFactory.java | 2 +- .../org/apache/hadoop/hbase/wal/TestWALSplit.java | 112 +++--- .../wal/TestWALSplitBoundedLogWriterCreation.java | 1 + .../hadoop/hbase/zookeeper/TestZooKeeperACL.java | 52 +-- 357 files changed, 1947 insertions(+), 1485 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java index 8560ddc1ef..3583afeb4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java @@ -33,6 +33,7 @@ public class SslRMIServerSocketFactorySecure extends SslRMIServerSocketFactory { @Override public ServerSocket createServerSocket(int port) throws IOException { return new ServerSocket(port) { + @Override public Socket accept() throws IOException { Socket socket = super.accept(); SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java index 3fea1a2530..b956e33700 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java @@ -227,6 +227,7 @@ public class EntityLock { return this; } + @Override public void run() { final LockHeartbeatRequest lockHeartbeatRequest = LockHeartbeatRequest.newBuilder().setProcId(procId).build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index 2bbb90bbf6..555a5c01a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -79,7 +79,7 @@ public class ConfigurationManager { // notified when the configuration is reloaded from disk. This is a set // constructed from a WeakHashMap, whose entries would be removed if the // observer classes go out of scope. - private Set configurationObservers = + private final Set configurationObservers = Collections.newSetFromMap(new WeakHashMap()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index bcba101b31..2143f80a69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -74,7 +74,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements private TaskExecutor splitTaskExecutor; - private AtomicInteger taskReadySeq = new AtomicInteger(0); + private final AtomicInteger taskReadySeq = new AtomicInteger(0); private volatile String currentTask = null; private int currentVersion; private volatile boolean shouldStop = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java index f460ac9325..ef91bf264f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java @@ -118,7 +118,7 @@ extends RowProcessorService implements RegionCoprocessor { Class cls; try { cls = Class.forName(className); - RowProcessor ci = (RowProcessor) cls.newInstance(); + RowProcessor ci = (RowProcessor) cls.getDeclaredConstructor().newInstance(); if (request.hasRowProcessorInitializerMessageName()) { Class imn = Class.forName(request.getRowProcessorInitializerMessageName()) .asSubclass(Message.class); @@ -141,11 +141,7 @@ extends RowProcessorService implements RegionCoprocessor { ci.initialize(s); } return ci; - } catch (ClassNotFoundException e) { - throw new IOException(e); - } catch (InstantiationException e) { - throw new IOException(e); - } catch (IllegalAccessException e) { + } catch (Exception e) { throw new IOException(e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 42da86acc9..05ac9f67f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -572,6 +572,7 @@ public abstract class CoprocessorHost observer = observerGetter.apply(getEnvironment().getInstance()); if (observer.isPresent()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java index 7de6f0b444..6ed1ad3314 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContextImpl.java @@ -48,6 +48,7 @@ public class ObserverContextImpl implements Ob this.bypassable = bypassable; } + @Override public E getEnvironment() { return env; } @@ -60,6 +61,7 @@ public class ObserverContextImpl implements Ob return this.bypassable; }; + @Override public void bypass() { if (!this.bypassable) { throw new UnsupportedOperationException("This method does not support 'bypass'."); @@ -82,6 +84,7 @@ public class ObserverContextImpl implements Ob return false; } + @Override public Optional getCaller() { return Optional.ofNullable(caller); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java index b073adae95..b805c50e15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.java @@ -87,7 +87,7 @@ class ReadOnlyConfiguration extends Configuration { } @Override - public void reloadConfiguration() { + public synchronized void reloadConfiguration() { // This is a write operation. We need to allow it though because if any Configuration in // current JVM context calls addDefaultResource, this forces a reload of all Configurations // (all Configurations are 'registered' by the default constructor. Rather than turn @@ -100,10 +100,12 @@ class ReadOnlyConfiguration extends Configuration { return conf.get(name); } + // Do not add @Override because it is not in Hadoop 2.6.5 public void setAllowNullValueProperties(boolean val) { throw new UnsupportedOperationException("Read-only Configuration"); } + @Override public String getTrimmed(String name) { return conf.getTrimmed(name); } @@ -129,12 +131,12 @@ class ReadOnlyConfiguration extends Configuration { } @Override - public void unset(String name) { + public synchronized void unset(String name) { throw new UnsupportedOperationException("Read-only Configuration"); } @Override - public void setIfUnset(String name, String value) { + public synchronized void setIfUnset(String name, String value) { throw new UnsupportedOperationException("Read-only Configuration"); } @@ -239,7 +241,7 @@ class ReadOnlyConfiguration extends Configuration { } @Override - public String[] getPropertySources(String name) { + public synchronized String[] getPropertySources(String name) { return conf.getPropertySources(name); } @@ -326,7 +328,7 @@ class ReadOnlyConfiguration extends Configuration { } @Override - public Class[] getClasses(String name, Class[] defaultValue) { + public Class[] getClasses(String name, Class... defaultValue) { return conf.getClasses(name, defaultValue); } @@ -422,7 +424,7 @@ class ReadOnlyConfiguration extends Configuration { } @Override - public void setQuietMode(boolean quietmode) { + public synchronized void setQuietMode(boolean quietmode) { throw new UnsupportedOperationException("Read-only Configuration"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java index b7c56e0714..9bc072a048 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -119,6 +119,7 @@ final public class FilterWrapper extends Filter { return filterCell(c); } + @Override public ReturnCode filterCell(final Cell c) throws IOException { return this.filter.filterCell(c); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index b89470fb4a..9ea67c1f13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -235,16 +235,15 @@ public class HFileSystem extends FilterFileSystem { } } - /** + /** * Returns a brand new instance of the FileSystem. It does not use * the FileSystem.Cache. In newer versions of HDFS, we can directly * invoke FileSystem.newInstance(Configuration). - * + * * @param conf Configuration * @return A new instance of the filesystem */ - private static FileSystem newInstanceFileSystem(Configuration conf) - throws IOException { + private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException { URI uri = FileSystem.getDefaultUri(conf); FileSystem fs = null; Class clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null); @@ -361,47 +360,43 @@ public class HFileSystem extends FilterFileSystem { private static ClientProtocol createReorderingProxy(final ClientProtocol cp, final ReorderBlocks lrb, final Configuration conf) { - return (ClientProtocol) Proxy.newProxyInstance - (cp.getClass().getClassLoader(), - new Class[]{ClientProtocol.class, Closeable.class}, - new InvocationHandler() { - public Object invoke(Object proxy, Method method, - Object[] args) throws Throwable { - try { - if ((args == null || args.length == 0) - && "close".equals(method.getName())) { - RPC.stopProxy(cp); - return null; - } else { - Object res = method.invoke(cp, args); - if (res != null && args != null && args.length == 3 - && "getBlockLocations".equals(method.getName()) - && res instanceof LocatedBlocks - && args[0] instanceof String - && args[0] != null) { - lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); - } - return res; - } - } catch (InvocationTargetException ite) { - // We will have this for all the exception, checked on not, sent - // by any layer, including the functional exception - Throwable cause = ite.getCause(); - if (cause == null){ - throw new RuntimeException( - "Proxy invocation failed and getCause is null", ite); - } - if (cause instanceof UndeclaredThrowableException) { - Throwable causeCause = cause.getCause(); - if (causeCause == null) { - throw new RuntimeException("UndeclaredThrowableException had null cause!"); - } - cause = cause.getCause(); - } - throw cause; + return (ClientProtocol) Proxy.newProxyInstance(cp.getClass().getClassLoader(), + new Class[]{ClientProtocol.class, Closeable.class}, new InvocationHandler() { + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + try { + if ((args == null || args.length == 0) && "close".equals(method.getName())) { + RPC.stopProxy(cp); + return null; + } else { + Object res = method.invoke(cp, args); + if (res != null && args != null && args.length == 3 + && "getBlockLocations".equals(method.getName()) + && res instanceof LocatedBlocks + && args[0] instanceof String + && args[0] != null) { + lrb.reorderBlocks(conf, (LocatedBlocks) res, (String) args[0]); } + return res; } - }); + } catch (InvocationTargetException ite) { + // We will have this for all the exception, checked on not, sent + // by any layer, including the functional exception + Throwable cause = ite.getCause(); + if (cause == null){ + throw new RuntimeException("Proxy invocation failed and getCause is null", ite); + } + if (cause instanceof UndeclaredThrowableException) { + Throwable causeCause = cause.getCause(); + if (causeCause == null) { + throw new RuntimeException("UndeclaredThrowableException had null cause!"); + } + cause = cause.getCause(); + } + throw cause; + } + } + }); } /** @@ -424,6 +419,7 @@ public class HFileSystem extends FilterFileSystem { * datanode is actually dead, so if we use it it will timeout. */ static class ReorderWALBlocks implements ReorderBlocks { + @Override public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException { @@ -481,6 +477,7 @@ public class HFileSystem extends FilterFileSystem { * createNonRecursive. This is a hadoop bug and when it is fixed in Hadoop, * this definition will go away. */ + @Override @SuppressWarnings("deprecation") public FSDataOutputStream createNonRecursive(Path f, boolean overwrite, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index 5fd9d364cc..6c73405f6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -22,15 +22,16 @@ import java.io.IOException; import java.io.InputStream; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; @@ -73,7 +74,7 @@ public class FSDataInputStreamWrapper implements Closeable { */ private volatile FSDataInputStream stream = null; private volatile FSDataInputStream streamNoFsChecksum = null; - private Object streamNoFsChecksumFirstCreateLock = new Object(); + private final Object streamNoFsChecksumFirstCreateLock = new Object(); // The configuration states that we should validate hbase checksums private boolean useHBaseChecksumConfigured; @@ -86,7 +87,7 @@ public class FSDataInputStreamWrapper implements Closeable { // In the case of a checksum failure, do these many succeeding // reads without hbase checksum verification. - private volatile int hbaseChecksumOffCount = -1; + private AtomicInteger hbaseChecksumOffCount = new AtomicInteger(-1); private Boolean instanceOfCanUnbuffer = null; // Using reflection to get org.apache.hadoop.fs.CanUnbuffer#unbuffer method to avoid compilation @@ -216,7 +217,7 @@ public class FSDataInputStreamWrapper implements Closeable { } if (!partOfConvoy) { this.useHBaseChecksum = false; - this.hbaseChecksumOffCount = offCount; + this.hbaseChecksumOffCount.set(offCount); } return this.stream; } @@ -224,7 +225,7 @@ public class FSDataInputStreamWrapper implements Closeable { /** Report that checksum was ok, so we may ponder going back to HBase checksum. */ public void checksumOk() { if (this.useHBaseChecksumConfigured && !this.useHBaseChecksum - && (this.hbaseChecksumOffCount-- < 0)) { + && (this.hbaseChecksumOffCount.getAndDecrement() < 0)) { // The stream we need is already open (because we were using HBase checksum in the past). assert this.streamNoFsChecksum != null; this.useHBaseChecksum = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 80207eb73e..f30d488d92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -129,35 +129,41 @@ public class HalfStoreFileReader extends StoreFileReader { final HFileScanner delegate = s; public boolean atEnd = false; + @Override public Cell getKey() { if (atEnd) return null; return delegate.getKey(); } + @Override public String getKeyString() { if (atEnd) return null; return delegate.getKeyString(); } + @Override public ByteBuffer getValue() { if (atEnd) return null; return delegate.getValue(); } + @Override public String getValueString() { if (atEnd) return null; return delegate.getValueString(); } + @Override public Cell getCell() { if (atEnd) return null; return delegate.getCell(); } + @Override public boolean next() throws IOException { if (atEnd) return false; @@ -200,10 +206,12 @@ public class HalfStoreFileReader extends StoreFileReader { return (this.delegate.getReader().getComparator().compare(splitCell, getKey())) > 0; } + @Override public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() { return this.delegate.getReader(); } + @Override public boolean isSeeked() { return this.delegate.isSeeked(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index 6dbfd2ff76..6dce132774 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -222,6 +222,7 @@ public class Reference { return Arrays.hashCode(splitkey) + region.hashCode(); } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null) return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index 5f4bb76d6f..1645d68be6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -507,6 +507,7 @@ public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { * @param syncBlock will call hsync if true, otherwise hflush. * @return A CompletableFuture that hold the acked length after flushing. */ + @Override public CompletableFuture flush(boolean syncBlock) { CompletableFuture future = new CompletableFuture<>(); flush0(future, syncBlock); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 50d85084c3..dccfe39ec0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -126,6 +126,7 @@ public interface BlockCache extends Iterable { /** * @return Iterator over the blocks in the cache. */ + @Override Iterator iterator(); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java index 768b37f42b..2aceed7512 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java @@ -156,6 +156,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase return result; } + @Override public boolean supportsAutoLoading() { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 70a3d4d90c..a0d3df38f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -545,7 +545,7 @@ public class FixedFileTrailer { try { // If null, it should be the Bytes.BYTES_RAWCOMPARATOR if (klass != null) { - CellComparator comp = klass.newInstance(); + CellComparator comp = klass.getDeclaredConstructor().newInstance(); // if the name wasn't one of the legacy names, maybe its a legit new // kind of comparator. comparatorClassName = klass.getName(); @@ -589,12 +589,8 @@ public class FixedFileTrailer { public static CellComparator createComparator( String comparatorClassName) throws IOException { try { - Class comparatorClass = getComparatorClass(comparatorClassName); - return comparatorClass != null ? comparatorClass.newInstance() : null; - } catch (InstantiationException e) { - throw new IOException("Comparator class " + comparatorClassName + - " is not instantiable", e); - } catch (IllegalAccessException e) { + return getComparatorClass(comparatorClassName).getDeclaredConstructor().newInstance(); + } catch (Exception e) { throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 992ebbde59..567441457a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -255,42 +255,43 @@ public class HFileBlock implements Cacheable { */ static final CacheableDeserializer BLOCK_DESERIALIZER = new CacheableDeserializer() { - public HFileBlock deserialize(ByteBuff buf, boolean reuse, MemoryType memType) + @Override + public HFileBlock deserialize(ByteBuff buf, boolean reuse, MemoryType memType) throws IOException { - // The buf has the file block followed by block metadata. - // Set limit to just before the BLOCK_METADATA_SPACE then rewind. - buf.limit(buf.limit() - BLOCK_METADATA_SPACE).rewind(); - // Get a new buffer to pass the HFileBlock for it to 'own'. - ByteBuff newByteBuff; - if (reuse) { - newByteBuff = buf.slice(); - } else { - int len = buf.limit(); - newByteBuff = new SingleByteBuff(ByteBuffer.allocate(len)); - newByteBuff.put(0, buf, buf.position(), len); - } - // Read out the BLOCK_METADATA_SPACE content and shove into our HFileBlock. - buf.position(buf.limit()); - buf.limit(buf.limit() + HFileBlock.BLOCK_METADATA_SPACE); - boolean usesChecksum = buf.get() == (byte)1; - long offset = buf.getLong(); - int nextBlockOnDiskSize = buf.getInt(); - HFileBlock hFileBlock = - new HFileBlock(newByteBuff, usesChecksum, memType, offset, nextBlockOnDiskSize, null); - return hFileBlock; - } + // The buf has the file block followed by block metadata. + // Set limit to just before the BLOCK_METADATA_SPACE then rewind. + buf.limit(buf.limit() - BLOCK_METADATA_SPACE).rewind(); + // Get a new buffer to pass the HFileBlock for it to 'own'. + ByteBuff newByteBuff; + if (reuse) { + newByteBuff = buf.slice(); + } else { + int len = buf.limit(); + newByteBuff = new SingleByteBuff(ByteBuffer.allocate(len)); + newByteBuff.put(0, buf, buf.position(), len); + } + // Read out the BLOCK_METADATA_SPACE content and shove into our HFileBlock. + buf.position(buf.limit()); + buf.limit(buf.limit() + HFileBlock.BLOCK_METADATA_SPACE); + boolean usesChecksum = buf.get() == (byte) 1; + long offset = buf.getLong(); + int nextBlockOnDiskSize = buf.getInt(); + HFileBlock hFileBlock = + new HFileBlock(newByteBuff, usesChecksum, memType, offset, nextBlockOnDiskSize, null); + return hFileBlock; + } - @Override - public int getDeserialiserIdentifier() { - return DESERIALIZER_IDENTIFIER; - } + @Override + public int getDeserialiserIdentifier() { + return DESERIALIZER_IDENTIFIER; + } - @Override - public HFileBlock deserialize(ByteBuff b) throws IOException { - // Used only in tests - return deserialize(b, false, MemoryType.EXCLUSIVE); - } - }; + @Override + public HFileBlock deserialize(ByteBuff b) throws IOException { + // Used only in tests + return deserialize(b, false, MemoryType.EXCLUSIVE); + } + }; private static final int DESERIALIZER_IDENTIFIER; static { @@ -1480,6 +1481,7 @@ public class HFileBlock implements Cacheable { this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext); } + @Override public BlockIterator blockRange(final long startOffset, final long endOffset) { final FSReader owner = this; // handle for inner class return new BlockIterator() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index a16565e7dd..1f591a09af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -947,7 +947,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { Cell ret; int cellBufSize = getKVBufSize(); - long seqId = 0l; + long seqId = 0L; if (this.reader.shouldIncludeMemStoreTS()) { seqId = currMemstoreTS; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 032c1adc4b..a2a35fef37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -166,5 +166,6 @@ public interface HFileScanner extends Shipper, Closeable { /** * Close this HFile scanner and do necessary cleanup. */ + @Override void close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 284447ae5a..d26b90ad27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -448,6 +448,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param cacheKey block's cache key * @param buf block buffer */ + @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { cacheBlock(cacheKey, buf, false); } @@ -794,6 +795,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return totalSize; } + @Override public int compareTo(BlockBucket that) { return Long.compare(this.overflow(), that.overflow()); } @@ -970,6 +972,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { *

Includes: total accesses, hits, misses, evicted blocks, and runs * of the eviction processes. */ + @Override public CacheStats getStats() { return this.stats; } @@ -1096,6 +1099,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return (long) Math.floor(this.maxSize * this.memoryFactor * this.minFactor); } + @Override public void shutdown() { if (victimHandler != null) { victimHandler.shutdown(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java index 21b3bfd7a1..32a277d462 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java @@ -90,6 +90,7 @@ public class LruCachedBlock implements HeapSize, Comparable { return this.cachedTime; } + @Override public long heapSize() { return size; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java index bed08feb81..4c67c9a961 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java @@ -103,6 +103,7 @@ public class LruCachedBlockQueue implements HeapSize { * Total size of all elements in this queue. * @return size of all elements currently in queue, in bytes */ + @Override public long heapSize() { return heapSize; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index 7bbb4ed466..e31b1cb834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -414,6 +414,7 @@ public final class BucketAllocator { } } + @Override public String toString() { StringBuilder sb = new StringBuilder(1024); for (int i = 0; i < buckets.length; ++i) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index d07c30d9f3..bd2b9c8797 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -36,6 +36,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Objects; import java.util.PriorityQueue; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; @@ -904,6 +905,7 @@ public class BucketCache implements BlockCache, HeapSize { this.writerEnabled = false; } + @Override public void run() { List entries = new ArrayList<>(); try { @@ -1395,10 +1397,22 @@ public class BucketCache implements BlockCache, HeapSize { } @Override - public boolean equals(Object that) { - return this == that; + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BucketEntryGroup that = (BucketEntryGroup) o; + return totalSize == that.totalSize && bucketSize == that.bucketSize + && Objects.equals(queue, that.queue); } + @Override + public int hashCode() { + return Objects.hash(queue, totalSize, bucketSize); + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java index fa39202cd0..29721ab659 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java @@ -59,6 +59,7 @@ public class CachedEntryQueue { } queue = MinMaxPriorityQueue.orderedBy(new Comparator>() { + @Override public int compare(Entry entry1, Entry entry2) { return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java index c340c066b2..915b82df42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BufferChain.java @@ -101,7 +101,7 @@ class BufferChain { try { long ret = channel.write(buffers, bufferOffset, bufCount); if (ret > 0) { - remaining -= ret; + remaining = (int) (remaining - ret); } return ret; } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java index 9a01a0a254..eaea34deef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java @@ -91,6 +91,7 @@ public class FastPathBalancedQueueRpcExecutor extends BalancedQueueRpcExecutor { this.fastPathHandlerStack = fastPathHandlerStack; } + @Override protected CallRunner getCallRunner() throws InterruptedException { // Get a callrunner if one in the Q. CallRunner cr = this.q.poll(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java index 096efa39b0..17bb362b49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java @@ -193,14 +193,15 @@ abstract class ServerRpcConnection implements Closeable { String className = header.getCellBlockCodecClass(); if (className == null || className.length() == 0) return; try { - this.codec = (Codec)Class.forName(className).newInstance(); + this.codec = (Codec)Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCellCodecException(className, e); } if (!header.hasCellBlockCompressorClass()) return; className = header.getCellBlockCompressorClass(); try { - this.compressionCodec = (CompressionCodec)Class.forName(className).newInstance(); + this.compressionCodec = + (CompressionCodec)Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCompressionCodecException(className, e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index b14c934898..13a3cf7171 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -548,6 +548,7 @@ public class SimpleRpcServer extends RpcServer { * The number of open RPC conections * @return the number of open rpc connections */ + @Override public int getNumOpenConnections() { return connectionManager.size(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 6b87194372..5e97204f40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -118,10 +118,8 @@ public class ClusterStatusPublisher extends ScheduledChore { this.master = master; this.messagePeriod = conf.getInt(STATUS_PUBLISH_PERIOD, DEFAULT_STATUS_PUBLISH_PERIOD); try { - this.publisher = publisherClass.newInstance(); - } catch (InstantiationException e) { - throw new IOException("Can't create publisher " + publisherClass.getName(), e); - } catch (IllegalAccessException e) { + this.publisher = publisherClass.getDeclaredConstructor().newInstance(); + } catch (Exception e) { throw new IOException("Can't create publisher " + publisherClass.getName(), e); } this.publisher.connect(conf); @@ -166,7 +164,8 @@ public class ClusterStatusPublisher extends ScheduledChore { .build()); } - protected void cleanup() { + @Override + protected synchronized void cleanup() { connected = false; publisher.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index db04c606a9..116d24e721 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -155,6 +155,7 @@ public class DeadServer { } } + @Override public synchronized String toString() { StringBuilder sb = new StringBuilder(); for (ServerName sn : deadServers.keySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index 3ec70d3b03..a5910250bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -69,11 +69,12 @@ public class HMasterCommandLine extends ServerCommandLine { this.masterClass = masterClass; } + @Override protected String getUsage() { return USAGE; } - + @Override public int run(String args[]) throws Exception { Options opt = new Options(); opt.addOption("localRegionServers", true, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index f1a05937c7..917da08488 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -156,6 +156,7 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse * Notification that config has changed * @param conf */ + @Override void onConfigurationChange(Configuration conf); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java index 18aab6e789..f25f3bfc4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java @@ -52,6 +52,7 @@ public class MasterAnnotationReadingPriorityFunction extends AnnotationReadingPr super(rpcServices, clz); } + @Override public int getPriority(RPCProtos.RequestHeader header, Message param, User user) { // Yes this is copy pasted from the base class but it keeps from having to look in the // annotatedQos table twice something that could get costly since this is called for diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 10e1d0a539..8396145176 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -133,6 +133,7 @@ public class MasterCoprocessorHost * @return An instance of MasterServices, an object NOT for general user-space Coprocessor * consumption. */ + @Override public MasterServices getMasterServices() { return this.masterServices; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java index 8a7c4e1bf8..6c5d677a86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java @@ -89,7 +89,7 @@ public class MobCompactionChore extends ScheduledChore { } @Override - protected void cleanup() { + protected synchronized void cleanup() { super.cleanup(); pool.shutdown(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java index 2f2d536ab5..29218e26f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java @@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class RegionServerTracker extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(RegionServerTracker.class); - private NavigableMap regionServers = new TreeMap<>(); + private final NavigableMap regionServers = new TreeMap<>(); private ServerManager serverManager; private MasterServices server; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index d1c1612f2e..2b88fb1326 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -637,7 +637,7 @@ public class SplitLogManager { public enum TerminationStatus { IN_PROGRESS("in_progress"), SUCCESS("success"), FAILURE("failure"), DELETED("deleted"); - String statusMsg; + final String statusMsg; TerminationStatus(String msg) { statusMsg = msg; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 38104038d7..8c597769d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -612,7 +612,7 @@ public class MergeTableRegionsProcedure final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); for (String family: regionFs.getFamilies()) { - final ColumnFamilyDescriptor hcd = htd.getColumnFamily(family.getBytes()); + final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); final Collection storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 1cf9a54459..fa94495e0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -33,6 +33,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -864,7 +865,7 @@ public class RegionStates { private final RegionStateNode regionNode; private volatile Exception exception = null; - private volatile int retries = 0; + private AtomicInteger retries = new AtomicInteger(); public RegionFailedOpen(final RegionStateNode regionNode) { this.regionNode = regionNode; @@ -879,11 +880,11 @@ public class RegionStates { } public int incrementAndGetRetries() { - return ++this.retries; + return this.retries.incrementAndGet(); } public int getRetries() { - return retries; + return retries.get(); } public void setException(final Exception exception) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index b3fbc57e3c..a72478c383 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -86,7 +86,7 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements } @Override - public void setMasterServices(MasterServices masterServices) { + public synchronized void setMasterServices(MasterServices masterServices) { super.setMasterServices(masterServices); fnm = masterServices.getFavoredNodesManager(); } @@ -692,7 +692,8 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements * implementation. For the misplaced regions, we assign a bogus server to it and AM takes care. */ @Override - public List balanceCluster(Map> clusterState) { + public synchronized List balanceCluster(Map> clusterState) { if (this.services != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index a9b1bb783e..07e9600e5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -72,21 +72,22 @@ class RegionLocationFinder { private CacheLoader loader = new CacheLoader() { - public ListenableFuture reload(final RegionInfo hri, - HDFSBlocksDistribution oldValue) throws Exception { - return executor.submit(new Callable() { - @Override - public HDFSBlocksDistribution call() throws Exception { - return internalGetTopBlockLocation(hri); - } - }); - } - + @Override + public ListenableFuture reload(final RegionInfo hri, + HDFSBlocksDistribution oldValue) throws Exception { + return executor.submit(new Callable() { @Override - public HDFSBlocksDistribution load(RegionInfo key) throws Exception { - return internalGetTopBlockLocation(key); + public HDFSBlocksDistribution call() throws Exception { + return internalGetTopBlockLocation(hri); } - }; + }); + } + + @Override + public HDFSBlocksDistribution load(RegionInfo key) throws Exception { + return internalGetTopBlockLocation(key); + } + }; // The cache for where regions are located. private LoadingCache cache = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index f53683fb38..b0d3f19ad1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -106,6 +106,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { } + @Override public void setClusterLoad(Map>> clusterLoad){ serverLoadList = new ArrayList<>(); float sum = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 6b4f943248..dca9cbbbec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -349,8 +349,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { // Allow turning this feature off if the locality cost is not going to // be used in any computations. RegionLocationFinder finder = null; - if (this.localityCost != null && this.localityCost.getMultiplier() > 0 - || this.rackLocalityCost != null && this.rackLocalityCost.getMultiplier() > 0) { + if ((this.localityCost != null && this.localityCost.getMultiplier() > 0) + || (this.rackLocalityCost != null && this.rackLocalityCost.getMultiplier() > 0)) { finder = this.regionFinder; } @@ -1401,7 +1401,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { // Now if we found a region load get the type of cost that was requested. if (regionLoadList != null) { - cost += getRegionLoadCost(regionLoadList); + cost = (long) (cost + getRegionLoadCost(regionLoadList)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index 775d8f9603..21f825132f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -205,7 +205,7 @@ public abstract class CleanerChore extends Schedu Class c = Class.forName(className).asSubclass( FileCleanerDelegate.class); @SuppressWarnings("unchecked") - T cleaner = (T) c.newInstance(); + T cleaner = (T) c.getDeclaredConstructor().newInstance(); cleaner.setConf(conf); cleaner.init(this.params); return cleaner; @@ -360,7 +360,7 @@ public abstract class CleanerChore extends Schedu } @Override - public void cleanup() { + public synchronized void cleanup() { for (T lc : this.cleanersChain) { try { lc.stop("Exiting"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java index 640c8f794f..08640a79a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java @@ -182,7 +182,7 @@ public class HFileCleaner extends CleanerChore { } @Override - public void cleanup() { + public synchronized void cleanup() { super.cleanup(); stopHFileDeleteThreads(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java index db364eefc8..9beed58940 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java @@ -108,7 +108,7 @@ public class LogCleaner extends CleanerChore { } @Override - public void cleanup() { + public synchronized void cleanup() { super.cleanup(); interruptOldWALsCleaner(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java index edf7642574..b4c55f4689 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java @@ -202,7 +202,7 @@ public final class LockProcedure extends Procedure * @return false, so procedure framework doesn't mark this procedure as failure. */ @Override - protected boolean setTimeoutFailure(final MasterProcedureEnv env) { + protected synchronized boolean setTimeoutFailure(final MasterProcedureEnv env) { synchronized (event) { if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event); if (!event.isReady()) { // Maybe unlock() awakened the event. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java index 09d05e6970..535f2888dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java @@ -71,7 +71,9 @@ public abstract class ProcedurePrepareLatch { } private static class NoopLatch extends ProcedurePrepareLatch { + @Override protected void countDown(final Procedure proc) {} + @Override public void await() throws IOException {} } @@ -80,6 +82,7 @@ public abstract class ProcedurePrepareLatch { private IOException exception = null; + @Override protected void countDown(final Procedure proc) { if (proc.hasException()) { exception = proc.getException().unwrapRemoteIOException(); @@ -87,6 +90,7 @@ public abstract class ProcedurePrepareLatch { latch.countDown(); } + @Override public void await() throws IOException { try { latch.await(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index 65c4d08cb5..ab282d5e00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -117,6 +117,7 @@ public class RSProcedureDispatcher } } + @Override protected void abortPendingOperations(final ServerName serverName, final Set operations) { // TODO: Replace with a ServerNotOnlineException() @@ -126,10 +127,12 @@ public class RSProcedureDispatcher } } + @Override public void serverAdded(final ServerName serverName) { addNode(serverName); } + @Override public void serverRemoved(final ServerName serverName) { removeNode(serverName); } @@ -138,6 +141,7 @@ public class RSProcedureDispatcher * Base remote call */ protected abstract class AbstractRSRemoteCall implements Callable { + @Override public abstract Void call(); private final ServerName serverName; @@ -269,6 +273,7 @@ public class RSProcedureDispatcher this.remoteProcedures = remoteProcedures; } + @Override public Void call() { request = ExecuteProceduresRequest.newBuilder(); if (LOG.isTraceEnabled()) { @@ -290,11 +295,13 @@ public class RSProcedureDispatcher return null; } + @Override public void dispatchOpenRequests(final MasterProcedureEnv env, final List operations) { request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations)); } + @Override public void dispatchCloseRequests(final MasterProcedureEnv env, final List operations) { for (RegionCloseOperation op: operations) { @@ -471,11 +478,13 @@ public class RSProcedureDispatcher return null; } + @Override public void dispatchOpenRequests(final MasterProcedureEnv env, final List operations) { submitTask(new OpenRegionRemoteCall(serverName, operations)); } + @Override public void dispatchCloseRequests(final MasterProcedureEnv env, final List operations) { for (RegionCloseOperation op: operations) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java index a8475f0f6e..559863e939 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java @@ -86,6 +86,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { return false; } + @Override public void setConf(final Configuration conf) { super.setConf(conf); try { @@ -95,6 +96,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { Path rootDir = FSUtils.getRootDir(conf); cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod, "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() { + @Override public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java index 397570cca6..7436d9c201 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java @@ -54,6 +54,7 @@ public class CachedMobFile extends MobFile implements Comparable this.accessCount = accessCount; } + @Override public int compareTo(CachedMobFile that) { if (this.accessCount == that.accessCount) return 0; return this.accessCount < that.accessCount ? 1 : -1; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java index 053cba641f..120f11e65a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java @@ -86,6 +86,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", justification="Intentional") + @Override public int run(String[] args) throws Exception { if (args.length != 2) { printUsage(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java index aaf545b1ff..ab917a2d92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java @@ -294,6 +294,7 @@ public class PartitionedMobCompactionRequest extends MobCompactionRequest { this.endKey = endKey; } + @Override public int compareTo(CompactionDelPartitionId o) { /* * 1). Compare the start key, if the k1 < k2, then k1 is less diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java index 6b90e6b1ed..1b6ad91bc8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java @@ -74,6 +74,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * monitored Handler. * @return the queue timestamp or -1 if there is no RPC currently running. */ + @Override public long getRPCQueueTime() { if (getState() != State.RUNNING) { return -1; @@ -86,6 +87,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * monitored Handler. * @return the start timestamp or -1 if there is no RPC currently running. */ + @Override public long getRPCStartTime() { if (getState() != State.RUNNING) { return -1; @@ -98,6 +100,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * by this Handler. * @return a string representing the method call without parameters */ + @Override public synchronized String getRPC() { return getRPC(false); } @@ -108,6 +111,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * @param withParams toggle inclusion of parameters in the RPC String * @return A human-readable string representation of the method call. */ + @Override public synchronized String getRPC(boolean withParams) { if (getState() != State.RUNNING) { // no RPC is currently running @@ -132,6 +136,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * by this Handler. * @return A human-readable string representation of the method call. */ + @Override public long getRPCPacketLength() { if (getState() != State.RUNNING || packet == null) { // no RPC is currently running, or we don't have an RPC's packet info @@ -146,6 +151,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * @return A human-readable string representation of the address and port * of the client. */ + @Override public String getClient() { return clientAddress + ":" + remotePort; } @@ -155,6 +161,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * RPC call. * @return true if the monitored handler is currently servicing an RPC call. */ + @Override public boolean isRPCRunning() { return getState() == State.RUNNING; } @@ -166,6 +173,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * @return true if the monitored handler is currently servicing an RPC call * to a database command. */ + @Override public synchronized boolean isOperationRunning() { if(!isRPCRunning()) { return false; @@ -183,6 +191,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * @param methodName The name of the method that will be called by the RPC. * @param params The parameters that will be passed to the indicated method. */ + @Override public synchronized void setRPC(String methodName, Object [] params, long queueTime) { this.methodName = methodName; @@ -197,6 +206,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * that it can later compute its size if asked for it. * @param param The protobuf received by the RPC for this call */ + @Override public void setRPCPacket(Message param) { this.packet = param; } @@ -206,6 +216,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl * @param clientAddress the address of the current client * @param remotePort the port from which the client connected */ + @Override public void setConnection(String clientAddress, int remotePort) { this.clientAddress = clientAddress; this.remotePort = remotePort; @@ -218,6 +229,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl this.packet = null; } + @Override public synchronized Map toMap() { // only include RPC info if the Handler is actively servicing an RPC call Map map = super.toMap(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java index b3869f4519..bedb5e28fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java @@ -154,6 +154,7 @@ class MonitoredTaskImpl implements MonitoredTask { * Force the completion timestamp backwards so that * it expires now. */ + @Override public void expireNow() { stateTime -= 180 * 1000; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 6749d2f19e..4aff779097 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -319,7 +319,7 @@ public class TaskMonitor { OPERATION("operation"), ALL("all"); - private String type; + private final String type; private TaskType(String type) { this.type = type.toLowerCase(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java index 49b344f0d9..fe3edfa63d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java @@ -103,7 +103,7 @@ public class Procedure implements Callable, ForeignExceptionListener { // /** lock to prevent nodes from acquiring and then releasing before we can track them */ - private Object joinBarrierLock = new Object(); + private final Object joinBarrierLock = new Object(); private final List acquiringMembers; private final List inBarrierMembers; private final HashMap dataFromFinishedMembers; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java index af4d2d7104..9ebb1d781d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java @@ -88,11 +88,9 @@ public abstract class ProcedureManagerHost { E impl; Object o = null; try { - o = implClass.newInstance(); + o = implClass.getDeclaredConstructor().newInstance(); impl = (E)o; - } catch (InstantiationException e) { - throw new IOException(e); - } catch (IllegalAccessException e) { + } catch (Exception e) { throw new IOException(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java index 6416e6a65b..d15f5acd54 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java @@ -153,6 +153,7 @@ abstract public class Subprocedure implements Callable { * Subprocedure, ForeignException)}. */ @SuppressWarnings("finally") + @Override final public Void call() { LOG.debug("Starting subprocedure '" + barrierName + "' with timeout " + executionTimeoutTimer.getMaxTime() + "ms"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java index c1fb8f5c1a..71ba28e525 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java @@ -19,19 +19,21 @@ package org.apache.hadoop.hbase.procedure; import java.io.IOException; import java.io.InterruptedIOException; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.List; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; + /** * ZooKeeper based {@link ProcedureCoordinatorRpcs} for a {@link ProcedureCoordinator} */ @@ -218,8 +220,8 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { } else { dataFromMember = Arrays.copyOfRange(dataFromMember, ProtobufUtil.lengthOfPBMagic(), dataFromMember.length); - LOG.debug("Finished data from procedure '" + procName - + "' member '" + member + "': " + new String(dataFromMember)); + LOG.debug("Finished data from procedure '{}' member '{}': {}", procName, member, + new String(dataFromMember, StandardCharsets.UTF_8)); coordinator.memberFinishedBarrier(procName, member, dataFromMember); } } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index ea41ae8972..f29d133d84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -348,6 +348,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { } } + @Override public void start(final String memberName, final ProcedureMember listener) { LOG.debug("Starting procedure member '" + memberName + "'"); this.member = listener; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java index 976e36b49b..9eb3fb36dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java @@ -157,6 +157,7 @@ public abstract class ZKProcedureUtil return ZNodePaths.joinZNode(controller.abortZnode, opInstanceName); } + @Override public ZKWatcher getWatcher() { return watcher; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index e68a1ce627..6783e7da0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -455,7 +455,7 @@ public class MasterQuotaManager implements RegionStateListener { } private static class NamedLock { - private HashSet locks = new HashSet<>(); + private final HashSet locks = new HashSet<>(); public void lock(final T name) throws InterruptedException { synchronized (locks) { @@ -501,6 +501,7 @@ public class MasterQuotaManager implements RegionStateListener { return time; } + @Override public boolean equals(Object o) { if (o instanceof SizeSnapshotWithTimestamp) { SizeSnapshotWithTimestamp other = (SizeSnapshotWithTimestamp) o; @@ -509,6 +510,7 @@ public class MasterQuotaManager implements RegionStateListener { return false; } + @Override public int hashCode() { HashCodeBuilder hcb = new HashCodeBuilder(); return hcb.append(size).append(time).toHashCode(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java index 5e20ce9b0f..869ead370a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaObserverChore.java @@ -386,7 +386,8 @@ public class QuotaObserverChore extends ScheduledChore { for (TableName tableInNS : tablesByNamespace.get(namespace)) { final SpaceQuotaSnapshot tableQuotaSnapshot = tableSnapshotStore.getCurrentState(tableInNS); - final boolean hasTableQuota = QuotaSnapshotStore.NO_QUOTA != tableQuotaSnapshot; + final boolean hasTableQuota = + !Objects.equals(QuotaSnapshotStore.NO_QUOTA, tableQuotaSnapshot); if (hasTableQuota && tableQuotaSnapshot.getQuotaStatus().isInViolation()) { // Table-level quota violation policy is being applied here. if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java index 0c856b1bf8..852d8a68f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RateLimiter.java @@ -100,6 +100,7 @@ public abstract class RateLimiter { this.avail = limit; } + @Override public String toString() { String rateLimiter = this.getClass().getSimpleName(); if (getLimit() == Long.MAX_VALUE) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index 2d4414c7a0..b0bdedeb5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -53,7 +53,7 @@ public class RegionServerSpaceQuotaManager { private SpaceQuotaRefresherChore spaceQuotaRefresher; private AtomicReference> currentQuotaSnapshots; private boolean started = false; - private ConcurrentHashMap enforcedPolicies; + private final ConcurrentHashMap enforcedPolicies; private SpaceViolationPolicyEnforcementFactory factory; public RegionServerSpaceQuotaManager(RegionServerServices rsServices) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java index 3fb7ad37c1..f19595fa4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotNotifierFactory.java @@ -54,8 +54,8 @@ public class SpaceQuotaSnapshotNotifierFactory { .getClass(SNAPSHOT_NOTIFIER_KEY, SNAPSHOT_NOTIFIER_DEFAULT, SpaceQuotaSnapshotNotifier.class); try { - return clz.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { + return clz.getDeclaredConstructor().newInstance(); + } catch (Exception e) { throw new IllegalArgumentException("Failed to instantiate the implementation", e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index 8a199089cd..6dbe0a871a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -54,7 +54,7 @@ public abstract class AbstractMemStore implements MemStore { // Used to track when to flush private volatile long timeOfOldestEdit; - public final static long FIXED_OVERHEAD = ClassSize.OBJECT + public final static long FIXED_OVERHEAD = (long) ClassSize.OBJECT + (4 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG); // snapshotId, timeOfOldestEdit diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java index 232ffe35ca..9a866a1559 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AdaptiveMemStoreCompactionStrategy.java @@ -101,10 +101,13 @@ public class AdaptiveMemStoreCompactionStrategy extends MemStoreCompactionStrate public void resetStats() { compactionProbability = initialCompactionProbability; } + + @Override protected Action getMergingAction() { return Action.MERGE_COUNT_UNIQUE_KEYS; } + @Override protected Action getFlattenAction() { return Action.FLATTEN; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java index 523ccf2e39..bf9b1915f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java @@ -82,7 +82,7 @@ public class CellChunkImmutableSegment extends ImmutableSegment { @Override protected long indexEntrySize() { - return (ClassSize.CELL_CHUNK_MAP_ENTRY - KeyValue.FIXED_OVERHEAD); + return ((long) ClassSize.CELL_CHUNK_MAP_ENTRY - KeyValue.FIXED_OVERHEAD); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java index 6159385b7b..a4fe883668 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java @@ -73,113 +73,140 @@ public class CellSet implements NavigableSet { return delegatee; } + @Override public Cell ceiling(Cell e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public Iterator descendingIterator() { return this.delegatee.descendingMap().values().iterator(); } + @Override public NavigableSet descendingSet() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public Cell floor(Cell e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public SortedSet headSet(final Cell toElement) { return headSet(toElement, false); } + @Override public NavigableSet headSet(final Cell toElement, boolean inclusive) { return new CellSet(this.delegatee.headMap(toElement, inclusive), UNKNOWN_NUM_UNIQUES); } + @Override public Cell higher(Cell e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public Iterator iterator() { return this.delegatee.values().iterator(); } + @Override public Cell lower(Cell e) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public Cell pollFirst() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public Cell pollLast() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public SortedSet subSet(Cell fromElement, Cell toElement) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public NavigableSet subSet(Cell fromElement, boolean fromInclusive, Cell toElement, boolean toInclusive) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public SortedSet tailSet(Cell fromElement) { return tailSet(fromElement, true); } + @Override public NavigableSet tailSet(Cell fromElement, boolean inclusive) { return new CellSet(this.delegatee.tailMap(fromElement, inclusive), UNKNOWN_NUM_UNIQUES); } + @Override public Comparator comparator() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public Cell first() { return this.delegatee.firstEntry().getValue(); } + @Override public Cell last() { return this.delegatee.lastEntry().getValue(); } + @Override public boolean add(Cell e) { return this.delegatee.put(e, e) == null; } + @Override public boolean addAll(Collection c) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public void clear() { this.delegatee.clear(); } + @Override public boolean contains(Object o) { //noinspection SuspiciousMethodCalls return this.delegatee.containsKey(o); } + @Override public boolean containsAll(Collection c) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public boolean isEmpty() { return this.delegatee.isEmpty(); } + @Override public boolean remove(Object o) { return this.delegatee.remove(o) != null; } + @Override public boolean removeAll(Collection c) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public boolean retainAll(Collection c) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } @@ -188,14 +215,17 @@ public class CellSet implements NavigableSet { return this.delegatee.get(kv); } + @Override public int size() { return this.delegatee.size(); } + @Override public Object[] toArray() { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } + @Override public T[] toArray(T[] a) { throw new UnsupportedOperationException(HConstants.NOT_IMPLEMENTED); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index d874b2e779..3cb4103b4d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -143,7 +143,7 @@ public class CompactingMemStore extends AbstractMemStore { factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT); } - inmemoryFlushSize *= factor; + inmemoryFlushSize = (long) (inmemoryFlushSize * factor); LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize + " and immutable segments index to be of type " + indexType); } @@ -365,7 +365,7 @@ public class CompactingMemStore extends AbstractMemStore { MutableSegment activeTmp = active; List pipelineList = pipeline.getSegments(); List snapshotList = snapshot.getAllSegments(); - long order = 1 + pipelineList.size() + snapshotList.size(); + long order = 1L + pipelineList.size() + snapshotList.size(); // The list of elements in pipeline + the active element + the snapshot segment // The order is the Segment ordinal List list = createList((int) order); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index 1aae068e35..8bd990a424 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -279,6 +279,7 @@ public class CompositeImmutableSegment extends ImmutableSegment { /** * Dumps all cells of the segment into the given log */ + @Override void dump(Logger log) { for (ImmutableSegment s : segments) { s.dump(log); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java index 4539ed625e..daae0832fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java @@ -89,6 +89,7 @@ public class DateTieredStoreEngine extends StoreEngine compact(ThroughputController throughputController, User user) throws IOException { if (request instanceof DateTieredCompactionRequest) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index b3f0a44dc8..26bf6400e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -61,11 +61,9 @@ public class DefaultStoreFlusher extends StoreFlusher { synchronized (flushLock) { status.setStatus("Flushing " + store + ": creating writer"); // Write the map out to the disk - writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(), - /* isCompaction = */ false, - /* includeMVCCReadpoint = */ true, - /* includesTags = */ snapshot.isTagsPresent(), - /* shouldDropBehind = */ false); + writer = store.createWriterInTmp(cellsCount, + store.getColumnFamilyDescriptor().getCompressionType(), false, true, + snapshot.isTagsPresent(), false); IOException e = null; try { performFlush(scanner, writer, smallestReadPoint, throughputController); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index d56a1c2b2b..740eb08992 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -26,6 +26,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -82,15 +83,15 @@ public class HMobStore extends HStore { private MobCacheConfig mobCacheConfig; private Path homePath; private Path mobFamilyPath; - private volatile long cellsCountCompactedToMob = 0; - private volatile long cellsCountCompactedFromMob = 0; - private volatile long cellsSizeCompactedToMob = 0; - private volatile long cellsSizeCompactedFromMob = 0; - private volatile long mobFlushCount = 0; - private volatile long mobFlushedCellsCount = 0; - private volatile long mobFlushedCellsSize = 0; - private volatile long mobScanCellsCount = 0; - private volatile long mobScanCellsSize = 0; + private AtomicLong cellsCountCompactedToMob = new AtomicLong(); + private AtomicLong cellsCountCompactedFromMob = new AtomicLong(); + private AtomicLong cellsSizeCompactedToMob = new AtomicLong(); + private AtomicLong cellsSizeCompactedFromMob = new AtomicLong(); + private AtomicLong mobFlushCount = new AtomicLong(); + private AtomicLong mobFlushedCellsCount = new AtomicLong(); + private AtomicLong mobFlushedCellsSize = new AtomicLong(); + private AtomicLong mobScanCellsCount = new AtomicLong(); + private AtomicLong mobScanCellsSize = new AtomicLong(); private ColumnFamilyDescriptor family; private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); @@ -453,76 +454,75 @@ public class HMobStore extends HStore { } public void updateCellsCountCompactedToMob(long count) { - cellsCountCompactedToMob += count; + cellsCountCompactedToMob.addAndGet(count); } public long getCellsCountCompactedToMob() { - return cellsCountCompactedToMob; + return cellsCountCompactedToMob.get(); } public void updateCellsCountCompactedFromMob(long count) { - cellsCountCompactedFromMob += count; + cellsCountCompactedFromMob.addAndGet(count); } public long getCellsCountCompactedFromMob() { - return cellsCountCompactedFromMob; + return cellsCountCompactedFromMob.get(); } public void updateCellsSizeCompactedToMob(long size) { - cellsSizeCompactedToMob += size; + cellsSizeCompactedToMob.addAndGet(size); } public long getCellsSizeCompactedToMob() { - return cellsSizeCompactedToMob; + return cellsSizeCompactedToMob.get(); } public void updateCellsSizeCompactedFromMob(long size) { - cellsSizeCompactedFromMob += size; + cellsSizeCompactedFromMob.addAndGet(size); } public long getCellsSizeCompactedFromMob() { - return cellsSizeCompactedFromMob; + return cellsSizeCompactedFromMob.get(); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT") public void updateMobFlushCount() { - mobFlushCount++; + mobFlushCount.incrementAndGet(); } public long getMobFlushCount() { - return mobFlushCount; + return mobFlushCount.get(); } public void updateMobFlushedCellsCount(long count) { - mobFlushedCellsCount += count; + mobFlushedCellsCount.addAndGet(count); } public long getMobFlushedCellsCount() { - return mobFlushedCellsCount; + return mobFlushedCellsCount.get(); } public void updateMobFlushedCellsSize(long size) { - mobFlushedCellsSize += size; + mobFlushedCellsSize.addAndGet(size); } public long getMobFlushedCellsSize() { - return mobFlushedCellsSize; + return mobFlushedCellsSize.get(); } public void updateMobScanCellsCount(long count) { - mobScanCellsCount += count; + mobScanCellsCount.addAndGet(count); } public long getMobScanCellsCount() { - return mobScanCellsCount; + return mobScanCellsCount.get(); } public void updateMobScanCellsSize(long size) { - mobScanCellsSize += size; + mobScanCellsSize.addAndGet(size); } public long getMobScanCellsSize() { - return mobScanCellsSize; + return mobScanCellsSize.get(); } public byte[] getRefCellTags() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index aa9fa0384c..3d267dd9f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.util.AbstractList; import java.util.ArrayList; @@ -1015,7 +1016,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } long storeMaxSequenceId = store.getMaxSequenceId().orElse(0L); - maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), + maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), storeMaxSequenceId); if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) { maxSeqId = storeMaxSequenceId; @@ -5524,7 +5525,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi HStore store = this.stores.get(column); if (store == null) { throw new IllegalArgumentException( - "No column family : " + new String(column) + " available"); + "No column family : " + new String(column, StandardCharsets.UTF_8) + " available"); } Collection storeFiles = store.getStorefiles(); if (storeFiles == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 37ec595921..bd7b4a0385 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -904,7 +904,7 @@ public class HRegionServer extends HasThread implements */ private boolean isClusterUp() { return this.masterless || - this.clusterStatusTracker != null && this.clusterStatusTracker.isClusterUp(); + (this.clusterStatusTracker != null && this.clusterStatusTracker.isClusterUp()); } /** @@ -1745,7 +1745,7 @@ public class HRegionServer extends HasThread implements if (r.shouldFlush(whyFlush)) { FlushRequester requester = server.getFlushRequester(); if (requester != null) { - long randomDelay = RandomUtils.nextInt(0, RANGE_OF_DELAY) + MIN_DELAY_TIME; + long randomDelay = (long) RandomUtils.nextInt(0, RANGE_OF_DELAY) + MIN_DELAY_TIME; LOG.info(getName() + " requesting flush of " + r.getRegionInfo().getRegionNameAsString() + " because " + whyFlush.toString() + @@ -3111,13 +3111,13 @@ public class HRegionServer extends HasThread implements } } - final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(encodedName.getBytes(), + final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(Bytes.toBytes(encodedName), Boolean.FALSE); if (Boolean.TRUE.equals(previous)) { LOG.info("Received CLOSE for the region:" + encodedName + " , which we are already " + "trying to OPEN. Cancelling OPENING."); - if (!regionsInTransitionInRS.replace(encodedName.getBytes(), previous, Boolean.FALSE)){ + if (!regionsInTransitionInRS.replace(Bytes.toBytes(encodedName), previous, Boolean.FALSE)) { // The replace failed. That should be an exceptional case, but theoretically it can happen. // We're going to try to do a standard close then. LOG.warn("The opening for region " + encodedName + " was done before we could cancel it." + @@ -3140,7 +3140,7 @@ public class HRegionServer extends HasThread implements if (actualRegion == null) { LOG.debug("Received CLOSE for a region which is not online, and we're not opening."); - this.regionsInTransitionInRS.remove(encodedName.getBytes()); + this.regionsInTransitionInRS.remove(Bytes.toBytes(encodedName)); // The master deletes the znode when it receives this exception. throw new NotServingRegionException("The region " + encodedName + " is not online, and is not opening."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java index d3509c2dbd..afd85f8d78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java @@ -44,6 +44,7 @@ public class HRegionServerCommandLine extends ServerCommandLine { this.regionServerClass = clazz; } + @Override protected String getUsage() { return USAGE; } @@ -73,6 +74,7 @@ public class HRegionServerCommandLine extends ServerCommandLine { return 0; } + @Override public int run(String args[]) throws Exception { if (args.length != 1) { usage(null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 01121dd80e..f228d44dea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -42,6 +42,7 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Predicate; @@ -149,8 +150,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat volatile boolean forceMajor = false; /* how many bytes to write between status checks */ static int closeCheckInterval = 0; - private volatile long storeSize = 0L; - private volatile long totalUncompressedBytes = 0L; + private AtomicLong storeSize = new AtomicLong(); + private AtomicLong totalUncompressedBytes = new AtomicLong(); /** * RWLock for store operations. @@ -209,13 +210,13 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat private int compactionCheckMultiplier; protected Encryption.Context cryptoContext = Encryption.Context.NONE; - private volatile long flushedCellsCount = 0; - private volatile long compactedCellsCount = 0; - private volatile long majorCompactedCellsCount = 0; - private volatile long flushedCellsSize = 0; - private volatile long flushedOutputFileSize = 0; - private volatile long compactedCellsSize = 0; - private volatile long majorCompactedCellsSize = 0; + private AtomicLong flushedCellsCount = new AtomicLong(); + private AtomicLong compactedCellsCount = new AtomicLong(); + private AtomicLong majorCompactedCellsCount = new AtomicLong(); + private AtomicLong flushedCellsSize = new AtomicLong(); + private AtomicLong flushedOutputFileSize = new AtomicLong(); + private AtomicLong compactedCellsSize = new AtomicLong(); + private AtomicLong majorCompactedCellsSize = new AtomicLong(); /** * Constructor @@ -544,8 +545,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat HStoreFile storeFile = completionService.take().get(); if (storeFile != null) { long length = storeFile.getReader().length(); - this.storeSize += length; - this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + this.storeSize.addAndGet(length); + this.totalUncompressedBytes + .addAndGet(storeFile.getReader().getTotalUncompressedBytes()); LOG.debug("loaded {}", storeFile); results.add(storeFile); } @@ -844,8 +846,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat private void bulkLoadHFile(HStoreFile sf) throws IOException { StoreFileReader r = sf.getReader(); - this.storeSize += r.length(); - this.totalUncompressedBytes += r.getTotalUncompressedBytes(); + this.storeSize.addAndGet(r.length()); + this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); // Append the new storefile into the list this.lock.writeLock().lock(); @@ -1021,8 +1023,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); - this.storeSize += r.length(); - this.totalUncompressedBytes += r.getTotalUncompressedBytes(); + this.storeSize.addAndGet(r.length()); + this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + @@ -1373,11 +1375,11 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat writeCompactionWalRecord(filesToCompact, sfs); replaceStoreFiles(filesToCompact, sfs); if (cr.isMajor()) { - majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs; - majorCompactedCellsSize += getCompactionProgress().totalCompactedSize; + majorCompactedCellsCount.addAndGet(getCompactionProgress().totalCompactingKVs); + majorCompactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize); } else { - compactedCellsCount += getCompactionProgress().totalCompactingKVs; - compactedCellsSize += getCompactionProgress().totalCompactedSize; + compactedCellsCount.addAndGet(getCompactionProgress().totalCompactingKVs); + compactedCellsSize.addAndGet(getCompactionProgress().totalCompactedSize); } long outputBytes = getTotalSize(sfs); @@ -1478,7 +1480,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } } message.append("total size for store is ") - .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize, "", 1)) + .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)) .append(". This selection was in queue for ") .append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime())) .append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime)) @@ -1772,7 +1774,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat completeCompaction(delSfs); LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this + " of " + this.getRegionInfo().getRegionNameAsString() - + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize, "", 1)); + + "; total size for store is " + + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); } public void cancelRequestedCompaction(CompactionContext compaction) { @@ -1826,16 +1829,16 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat @VisibleForTesting protected void completeCompaction(Collection compactedFiles) throws IOException { - this.storeSize = 0L; - this.totalUncompressedBytes = 0L; + this.storeSize.set(0L); + this.totalUncompressedBytes.set(0L); for (HStoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { StoreFileReader r = hsf.getReader(); if (r == null) { LOG.warn("StoreFile {} has a null Reader", hsf); continue; } - this.storeSize += r.length(); - this.totalUncompressedBytes += r.getTotalUncompressedBytes(); + this.storeSize.addAndGet(r.length()); + this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes()); } } @@ -1896,7 +1899,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat @Override public long getSize() { - return storeSize; + return storeSize.get(); } public void triggerMajorCompaction() { @@ -2043,7 +2046,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat @Override public long getStoreSizeUncompressed() { - return this.totalUncompressedBytes; + return this.totalUncompressedBytes.get(); } @Override @@ -2235,9 +2238,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat committedFiles.add(sf.getPath()); } - HStore.this.flushedCellsCount += cacheFlushCount; - HStore.this.flushedCellsSize += cacheFlushSize; - HStore.this.flushedOutputFileSize += outputFileSize; + HStore.this.flushedCellsCount.addAndGet(cacheFlushCount); + HStore.this.flushedCellsSize.addAndGet(cacheFlushSize); + HStore.this.flushedOutputFileSize.addAndGet(outputFileSize); // Add new file to store files. Clear snapshot too while we have the Store write lock. return HStore.this.updateStorefiles(storeFiles, snapshot.getId()); @@ -2270,8 +2273,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); - HStore.this.storeSize += storeFile.getReader().length(); - HStore.this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + HStore.this.storeSize.addAndGet(storeFile.getReader().length()); + HStore.this.totalUncompressedBytes + .addAndGet(storeFile.getReader().getTotalUncompressedBytes()); if (LOG.isInfoEnabled()) { LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + @@ -2315,7 +2319,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } public static final long FIXED_OVERHEAD = - ClassSize.align(ClassSize.OBJECT + (17 * ClassSize.REFERENCE) + (11 * Bytes.SIZEOF_LONG) + ClassSize.align(ClassSize.OBJECT + (26 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + (5 * Bytes.SIZEOF_INT) + (2 * Bytes.SIZEOF_BOOLEAN)); public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD @@ -2354,37 +2358,37 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat @Override public long getFlushedCellsCount() { - return flushedCellsCount; + return flushedCellsCount.get(); } @Override public long getFlushedCellsSize() { - return flushedCellsSize; + return flushedCellsSize.get(); } @Override public long getFlushedOutputFileSize() { - return flushedOutputFileSize; + return flushedOutputFileSize.get(); } @Override public long getCompactedCellsCount() { - return compactedCellsCount; + return compactedCellsCount.get(); } @Override public long getCompactedCellsSize() { - return compactedCellsSize; + return compactedCellsSize.get(); } @Override public long getMajorCompactedCellsCount() { - return majorCompactedCellsCount; + return majorCompactedCellsCount.get(); } @Override public long getMajorCompactedCellsSize() { - return majorCompactedCellsSize; + return majorCompactedCellsSize.get(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index 19a63b45d2..21446d2cf5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -95,7 +95,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS } } - return foundABigStore | force; + return foundABigStore || force; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index 1e71bc85a0..fe527581b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -67,5 +67,6 @@ public interface InternalScanner extends Closeable { * Closes the scanner and releases any resources it has allocated * @throws IOException */ + @Override void close() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 779ed4978a..053ae997c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -104,6 +104,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner } } + @Override public Cell peek() { if (this.current == null) { return null; @@ -111,6 +112,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner return this.current.peek(); } + @Override public Cell next() throws IOException { if(this.current == null) { return null; @@ -182,6 +184,8 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner public KVScannerComparator(CellComparator kvComparator) { this.kvComparator = kvComparator; } + + @Override public int compare(KeyValueScanner left, KeyValueScanner right) { int comparison = compare(left.peek(), right.peek()); if (comparison != 0) { @@ -210,6 +214,7 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner } } + @Override public void close() { for (KeyValueScanner scanner : this.scannersForDelayedClose) { scanner.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index 796f7c9dd2..864cc061c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -84,6 +84,7 @@ public interface KeyValueScanner extends Shipper, Closeable { /** * Close the KeyValue scanner. */ + @Override void close(); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index d564e40642..7e735e6fcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -167,8 +167,8 @@ public class MemStoreCompactor { // Substitute the pipeline with one segment if (!isInterrupted.get()) { - if (resultSwapped = compactingMemStore.swapCompactedSegments( - versionedList, result, merge)) { + if ((resultSwapped = compactingMemStore.swapCompactedSegments( + versionedList, result, merge))) { // update compaction strategy strategy.updateStats(result); // update the wal so it can be truncated and not get too long diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index fdee404166..f7493b0896 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.BlockingQueue; @@ -713,9 +714,14 @@ class MemStoreFlusher implements FlushRequester { return -1; } + @Override + public int hashCode() { + return System.identityHashCode(this); + } + @Override public boolean equals(Object obj) { - return (this == obj); + return Objects.equals(this, obj); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java index f43573e539..02824ba94b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreMergerSegmentsIterator.java @@ -76,6 +76,7 @@ public class MemStoreMergerSegmentsIterator extends MemStoreSegmentsIterator { return null; } + @Override public void close() { if (closed) { return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index eaaa4ae84b..09929e1b54 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -788,7 +788,8 @@ class MetricsRegionServerWrapperImpl OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge(); if (storeAvgStoreFileAge.isPresent()) { - avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles; + avgAgeNumerator = + (long) (avgAgeNumerator + storeAvgStoreFileAge.getAsDouble() * storeHFiles); } tempStorefileIndexSize += store.getStorefilesRootLevelIndexSize(); @@ -931,6 +932,7 @@ class MetricsRegionServerWrapperImpl return averageRegionSize; } + @Override public long getDataMissCount() { if (this.cacheStats == null) { return 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java index 2aa1a82dc0..533a05d533 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -249,7 +249,7 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge(); if (storeAvgStoreFileAge.isPresent()) { - avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles; + avgAgeNumerator += (long) storeAvgStoreFileAge.getAsDouble() * storeHFiles; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java index 54095e000b..0c3551b547 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java @@ -74,8 +74,12 @@ public class MultiVersionConcurrencyControl { public void advanceTo(long newStartPoint) { while (true) { long seqId = this.getWritePoint(); - if (seqId >= newStartPoint) break; - if (this.tryAdvanceTo(/* newSeqId = */ newStartPoint, /* expected = */ seqId)) break; + if (seqId >= newStartPoint) { + break; + } + if (this.tryAdvanceTo(newStartPoint, seqId)) { + break; + } } } @@ -239,6 +243,7 @@ public class MultiVersionConcurrencyControl { } @VisibleForTesting + @Override public String toString() { return MoreObjects.toStringHelper(this) .add("readPoint", readPoint) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index b6c0ebe2a4..5a015818ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2807,7 +2807,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private static final long serialVersionUID = -4305297078988180130L; @Override - public Throwable fillInStackTrace() { + public synchronized Throwable fillInStackTrace() { return this; } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java index dc1708cfbc..198666804c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java @@ -279,6 +279,7 @@ public class RegionServerCoprocessorHost extends * @return An instance of RegionServerServices, an object NOT for general user-space Coprocessor * consumption. */ + @Override public RegionServerServices getRegionServerServices() { return this.regionServerServices; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 017e0fb709..6b2267f851 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -435,8 +435,8 @@ public class ScannerContext { TIME_LIMIT_REACHED_MID_ROW(true, true), BATCH_LIMIT_REACHED(true, true); - private boolean moreValues; - private boolean limitReached; + private final boolean moreValues; + private final boolean limitReached; private NextState(boolean moreValues, boolean limitReached) { this.moreValues = moreValues; @@ -492,13 +492,13 @@ public class ScannerContext { * limits, the checker must know their own scope (i.e. are they checking the limits between * rows, between cells, etc...) */ - int depth; + final int depth; LimitScope(int depth) { this.depth = depth; } - int depth() { + final int depth() { return depth; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index b67b54eca3..0b1d251d98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -97,7 +97,7 @@ public class ServerNonceManager { } public boolean isExpired(long minRelevantTime) { - return getActivityTime() < (minRelevantTime & (~0l >>> 3)); + return getActivityTime() < (minRelevantTime & (~0L >>> 3)); } public void setMvcc(long mvcc) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java index 4f3e0f2766..9753080bcf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java @@ -24,6 +24,7 @@ public class SteppingSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy * This allows a table to spread quickly across servers, while avoiding creating * too many regions. */ + @Override protected long getSizeToCheck(final int tableRegionsCount) { return tableRegionsCount == 1 ? this.initialSize : getDesiredMaxFileSize(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 89b2acd8e8..80d0ad7f26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -187,14 +187,17 @@ public class StoreFileScanner implements KeyValueScanner { return scanners; } + @Override public String toString() { return "StoreFileScanner[" + hfs.toString() + ", cur=" + cur + "]"; } + @Override public Cell peek() { return cur; } + @Override public Cell next() throws IOException { Cell retKey = cur; @@ -215,6 +218,7 @@ public class StoreFileScanner implements KeyValueScanner { return retKey; } + @Override public boolean seek(Cell key) throws IOException { if (seekCount != null) seekCount.increment(); @@ -242,6 +246,7 @@ public class StoreFileScanner implements KeyValueScanner { } } + @Override public boolean reseek(Cell key) throws IOException { if (seekCount != null) seekCount.increment(); @@ -298,6 +303,7 @@ public class StoreFileScanner implements KeyValueScanner { return true; } + @Override public void close() { if (closed) return; cur = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 595231ff1c..59b91d5b62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -104,12 +104,8 @@ public class StripeStoreFlusher extends StoreFlusher { return new StripeMultiFileWriter.WriterFactory() { @Override public StoreFileWriter createWriter() throws IOException { - StoreFileWriter writer = store.createWriterInTmp( - kvCount, store.getColumnFamilyDescriptor().getCompressionType(), - /* isCompaction = */ false, - /* includeMVCCReadpoint = */ true, - /* includesTags = */ true, - /* shouldDropBehind = */ false); + StoreFileWriter writer = store.createWriterInTmp(kvCount, + store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false); return writer; } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index ed4a025631..056f076e2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.regionserver.CellSink; -import org.apache.hadoop.hbase.regionserver.CustomizedScanInfoBuilder; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -261,10 +260,8 @@ public abstract class Compactor { throws IOException { // When all MVCC readpoints are 0, don't write them. // See HBASE-8166, HBASE-12600, and HBASE-13389. - return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, - /* isCompaction = */true, - /* includeMVCCReadpoint = */fd.maxMVCCReadpoint > 0, - /* includesTags = */fd.maxTagsLength > 0, shouldDropBehind); + return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true, + fd.maxMVCCReadpoint > 0, fd.maxTagsLength > 0, shouldDropBehind); } private ScanInfo preCompactScannerOpen(CompactionRequestImpl request, ScanType scanType, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index 905562c985..cf04d00fd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -108,6 +108,7 @@ public class DateTieredCompactionPolicy extends SortedCompactionPolicy { } } + @Override public boolean shouldPerformMajorCompaction(Collection filesToCompact) throws IOException { long mcTime = getNextMajorCompactTime(filesToCompact); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index a6ea9b22f4..e0be6cfafd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -209,6 +209,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { * @param filesCompacting files being scheduled to compact. * @return true to schedule a request. */ + @Override public boolean needsCompaction(Collection storeFiles, List filesCompacting) { int numCandidates = storeFiles.size() - filesCompacting.size(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index 4f6aba9b8e..3eb830a264 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -106,6 +106,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { * @param filesToCompact Files to compact. Can be null. * @return True if we should run a major compaction. */ + @Override public abstract boolean shouldPerformMajorCompaction(Collection filesToCompact) throws IOException; @@ -154,6 +155,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { * @param compactionSize Total size of some compaction * @return whether this should be a large or small compaction */ + @Override public boolean throttleCompaction(long compactionSize) { return compactionSize > comConf.getThrottlePoint(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index 85394fd0ff..c0f13c0ac5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -89,10 +89,12 @@ public class ExplicitColumnTracker implements ColumnTracker { /** * Done when there are no more columns to match against. */ + @Override public boolean done() { return this.index >= columns.length; } + @Override public ColumnCount getColumnHint() { return this.column; } @@ -182,6 +184,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } // Called between every row. + @Override public void reset() { this.index = 0; this.column = this.columns[this.index]; @@ -240,6 +243,7 @@ public class ExplicitColumnTracker implements ColumnTracker { } } + @Override public boolean isDone(long timestamp) { return minVersions <= 0 && isExpired(timestamp); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java index 419e93be67..f2ad1e6b87 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanWildcardColumnTracker.java @@ -180,6 +180,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { * scanner). * @return The column count. */ + @Override public ColumnCount getColumnHint() { return null; } @@ -205,6 +206,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker { } } + @Override public boolean isDone(long timestamp) { return minVersions <= 0 && isExpired(timestamp); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java index 0ace78280e..faf3b7747e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java @@ -346,7 +346,7 @@ public class AsyncFSWAL extends AbstractFSWAL { long currentHighestProcessedAppendTxid = highestProcessedAppendTxid; highestProcessedAppendTxidAtLastSync = currentHighestProcessedAppendTxid; final long startTimeNs = System.nanoTime(); - final long epoch = epochAndState >>> 2; + final long epoch = (long) epochAndState >>> 2L; writer.sync().whenCompleteAsync((result, error) -> { if (error != null) { syncFailed(epoch, error); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 0c880f53df..101e64b1db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -625,7 +625,7 @@ public class FSHLog extends AbstractFSWAL { } finally { rollWriterLock.unlock(); } - if (lowReplication || writer != null && writer.getLength() > logrollsize) { + if (lowReplication || (writer != null && writer.getLength() > logrollsize)) { requestLogRoll(lowReplication); } } @@ -866,7 +866,7 @@ public class FSHLog extends AbstractFSWAL { private final SyncFuture[] syncFutures; // Had 'interesting' issues when this was non-volatile. On occasion, we'd not pass all // syncFutures to the next sync'ing thread. - private volatile int syncFuturesCount = 0; + private AtomicInteger syncFuturesCount = new AtomicInteger(); private volatile SafePointZigZagLatch zigzagLatch; /** * Set if we get an exception appending or syncing so that all subsequence appends and syncs on @@ -894,10 +894,10 @@ public class FSHLog extends AbstractFSWAL { private void cleanupOutstandingSyncsOnException(final long sequence, final Exception e) { // There could be handler-count syncFutures outstanding. - for (int i = 0; i < this.syncFuturesCount; i++) { + for (int i = 0; i < this.syncFuturesCount.get(); i++) { this.syncFutures[i].done(sequence, e); } - this.syncFuturesCount = 0; + this.syncFuturesCount.set(0); } /** @@ -905,7 +905,7 @@ public class FSHLog extends AbstractFSWAL { */ private boolean isOutstandingSyncs() { // Look at SyncFutures in the EventHandler - for (int i = 0; i < this.syncFuturesCount; i++) { + for (int i = 0; i < this.syncFuturesCount.get(); i++) { if (!this.syncFutures[i].isDone()) { return true; } @@ -938,9 +938,9 @@ public class FSHLog extends AbstractFSWAL { try { if (truck.type() == RingBufferTruck.Type.SYNC) { - this.syncFutures[this.syncFuturesCount++] = truck.unloadSync(); + this.syncFutures[this.syncFuturesCount.getAndIncrement()] = truck.unloadSync(); // Force flush of syncs if we are carrying a full complement of syncFutures. - if (this.syncFuturesCount == this.syncFutures.length) { + if (this.syncFuturesCount.get() == this.syncFutures.length) { endOfBatch = true; } } else if (truck.type() == RingBufferTruck.Type.APPEND) { @@ -979,7 +979,7 @@ public class FSHLog extends AbstractFSWAL { if (this.exception == null) { // If not a batch, return to consume more events from the ring buffer before proceeding; // we want to get up a batch of syncs and appends before we go do a filesystem sync. - if (!endOfBatch || this.syncFuturesCount <= 0) { + if (!endOfBatch || this.syncFuturesCount.get() <= 0) { return; } // syncRunnerIndex is bound to the range [0, Integer.MAX_INT - 1] as follows: @@ -997,7 +997,7 @@ public class FSHLog extends AbstractFSWAL { // Below expects that the offer 'transfers' responsibility for the outstanding syncs to // the syncRunner. We should never get an exception in here. this.syncRunners[this.syncRunnerIndex].offer(sequence, this.syncFutures, - this.syncFuturesCount); + this.syncFuturesCount.get()); } catch (Exception e) { // Should NEVER get here. requestLogRoll(); @@ -1010,7 +1010,7 @@ public class FSHLog extends AbstractFSWAL { ? this.exception : new DamagedWALException("On sync", this.exception)); } attainSafePoint(sequence); - this.syncFuturesCount = 0; + this.syncFuturesCount.set(0); } catch (Throwable t) { LOG.error("UNEXPECTED!!! syncFutures.length=" + this.syncFutures.length, t); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 57eccbbded..4e88df0631 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -85,6 +85,7 @@ class FSWALEntry extends Entry { } } + @Override public String toString() { return "sequence=" + this.txid + ", " + super.toString(); }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index ebb6079b08..5d8d8c00ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -102,10 +102,12 @@ public class ProtobufLogReader extends ReaderBase { public long trailerSize() { if (trailerPresent) { // sizeof PB_WAL_COMPLETE_MAGIC + sizof trailerSize + trailer - final long calculatedSize = PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + trailer.getSerializedSize(); + final long calculatedSize = (long) PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT + + trailer.getSerializedSize(); final long expectedSize = fileLength - walEditsStopOffset; if (expectedSize != calculatedSize) { - LOG.warn("After parsing the trailer, we expect the total footer to be "+ expectedSize +" bytes, but we calculate it as being " + calculatedSize); + LOG.warn("After parsing the trailer, we expect the total footer to be {} bytes, but we " + + "calculate it as being {}", expectedSize, calculatedSize); } return expectedSize; } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java index 2093421d82..42d0299026 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java @@ -84,7 +84,7 @@ public abstract class BaseReplicationEndpoint extends AbstractService for (String filterName : filterNames) { try { Class clazz = Class.forName(filterName); - filters.add((WALEntryFilter) clazz.newInstance()); + filters.add((WALEntryFilter) clazz.getDeclaredConstructor().newInstance()); } catch (Exception e) { LOG.error("Unable to create WALEntryFilter " + filterName, e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java index b28c58fc28..8a4d331aef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DefaultSourceFSConfigurationProvider.java @@ -35,7 +35,7 @@ public class DefaultSourceFSConfigurationProvider implements SourceFSConfigurati LoggerFactory.getLogger(DefaultSourceFSConfigurationProvider.class); // Map containing all the source clusters configurations against their replication cluster id - private Map sourceClustersConfs = new HashMap<>(); + private final Map sourceClustersConfs = new HashMap<>(); private static final String XML = ".xml"; @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index dcd79a6f67..28494e67ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -105,6 +105,7 @@ public class Replication implements public Replication() { } + @Override public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, WALFileLengthProvider walFileLengthProvider) throws IOException { this.server = server; @@ -165,12 +166,14 @@ public class Replication implements /* * Returns an object to listen to new wal changes **/ + @Override public WALActionsListener getWALActionsListener() { return this; } /** * Stops replication service. */ + @Override public void stopReplicationService() { join(); } @@ -199,6 +202,7 @@ public class Replication implements * @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory * @throws IOException */ + @Override public void replicateLogEntries(List entries, CellScanner cells, String replicationClusterId, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException { @@ -211,6 +215,7 @@ public class Replication implements * it starts * @throws IOException */ + @Override public void startReplicationService() throws IOException { try { this.replicationManager.init(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 57e185a7a2..902971ede8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -110,7 +110,7 @@ public class ReplicationSink { try { @SuppressWarnings("rawtypes") Class c = Class.forName(className); - this.provider = (SourceFSConfigurationProvider) c.newInstance(); + this.provider = (SourceFSConfigurationProvider) c.getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new IllegalArgumentException("Configured source fs configuration provider class " + className + " throws error.", e); @@ -123,7 +123,7 @@ public class ReplicationSink { WALEntrySinkFilter filter = null; try { filter = walEntryFilterClass == null? null: - (WALEntrySinkFilter)walEntryFilterClass.newInstance(); + (WALEntrySinkFilter)walEntryFilterClass.getDeclaredConstructor().newInstance(); } catch (Exception e) { LOG.warn("Failed to instantiate " + walEntryFilterClass); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 16fb4a7a6d..9db3cc2fef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -40,11 +40,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.replication.ChainWALEntryFilter; import org.apache.hadoop.hbase.replication.ClusterMarkingEntryFilter; @@ -61,8 +57,11 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** * Class that handles the source of a replication stream. @@ -225,7 +224,8 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf // A peerId will not have "-" in its name, see HBASE-11394 peerId = peerClusterZnode.split("-")[0]; } - Map> tableCFMap = replicationPeers.getConnectedPeer(peerId).getTableCFs(); + Map> tableCFMap = + replicationPeers.getConnectedPeer(peerId).getTableCFs(); if (tableCFMap != null) { List tableCfs = tableCFMap.get(tableName); if (tableCFMap.containsKey(tableName) @@ -470,7 +470,8 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf } if (this.replicationEndpoint != null) { try { - this.replicationEndpoint.awaitTerminated(sleepForRetries * maxRetriesMultiplier, TimeUnit.MILLISECONDS); + this.replicationEndpoint + .awaitTerminated(sleepForRetries * maxRetriesMultiplier, TimeUnit.MILLISECONDS); } catch (TimeoutException te) { LOG.warn("Got exception while waiting for endpoint to shutdown for replication source :" + this.peerClusterZnode, @@ -494,7 +495,9 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf public Path getCurrentPath() { // only for testing for (ReplicationSourceShipper worker : workerThreads.values()) { - if (worker.getCurrentPath() != null) return worker.getCurrentPath(); + if (worker.getCurrentPath() != null) { + return worker.getCurrentPath(); + } } return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 23ae704e5b..55ebdc1394 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -110,7 +110,7 @@ public class ReplicationSourceManager implements ReplicationListener { private final Configuration conf; private final FileSystem fs; // The paths to the latest log of each wal group, for new coming peers - private Set latestPaths; + private final Set latestPaths; // Path to the wals directories private final Path logDir; // Path to the wal archive diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java index 4643a226f9..e56fab232b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java @@ -355,7 +355,8 @@ public class ReplicationSourceWALReader extends Thread { List stores = bld.getStoresList(); int totalStores = stores.size(); for (int j = 0; j < totalStores; j++) { - totalStoreFilesSize += stores.get(j).getStoreFileSizeBytes(); + totalStoreFilesSize = + (int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes()); } } catch (IOException e) { LOG.error("Failed to deserialize bulk load entry from wal edit. " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index bfc415c3c4..7e444cb363 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -154,6 +154,7 @@ class AccessControlFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { // no implementation, server-side use only throw new UnsupportedOperationException( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 5a3c883388..f191c9d1d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -2186,8 +2186,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, // Also using acl as table name to be inline with the results of global admin and will // help in avoiding any leakage of information about being superusers. for (String user: Superusers.getSuperUsers()) { - perms.add(new UserPermission(user.getBytes(), AccessControlLists.ACL_TABLE_NAME, null, - Action.values())); + perms.add(new UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME, + null, Action.values())); } } response = AccessControlUtil.buildGetUserPermissionsResponse(perms); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java index 1949b98a96..cecca41c03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java @@ -207,6 +207,7 @@ public class AuthResult { return sb.toString(); } + @Override public String toString() { return "AuthResult" + toContextString(); } @@ -279,6 +280,7 @@ public class AuthResult { return this; } + @Override public String toString() { String familiesString = toFamiliesString(families, family, qualifier); String[] params = new String[] { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index de8ea5d3ab..59b91a0e4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -330,6 +330,7 @@ public class AuthenticationTokenSecretManager interrupt(); } + @Override public void run() { zkLeader.start(); zkLeader.waitToBecomeLeader(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java index 0bc74b18dc..c2dd046474 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ExpressionParser.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.security.visibility; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Stack; @@ -103,7 +104,8 @@ public class ExpressionParser { } index++; } while (index < endPos && !isEndOfLabel(exp[index])); - leafExp = new String(exp, labelOffset, index - labelOffset).trim(); + leafExp = + new String(exp, labelOffset, index - labelOffset, StandardCharsets.UTF_8).trim(); if (leafExp.isEmpty()) { throw new ParseException("Error parsing expression " + expS + " at column : " + index); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 1ba6029162..6e00f40195 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -36,6 +36,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import org.apache.hadoop.conf.Configuration; @@ -753,8 +754,9 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso logResult(true, "addLabels", "Adding labels allowed", null, labels, null); int i = 0; for (OperationStatus status : opStatus) { - while (response.getResult(i) != successResult) + while (!Objects.equals(response.getResult(i), successResult)) { i++; + } if (status.getOperationStatusCode() != SUCCESS) { RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder(); failureResultBuilder.setException(buildException(new DoNotRetryIOException( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java index f6ed72ff74..f3e4853592 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java @@ -149,7 +149,7 @@ public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTrack List putVisTags = new ArrayList<>(); Byte putCellVisTagsFormat = VisibilityUtils.extractVisibilityTags(put, putVisTags); return putVisTags.isEmpty() == delInfo.tags.isEmpty() && ( - putVisTags.isEmpty() && delInfo.tags.isEmpty() || VisibilityLabelServiceManager + (putVisTags.isEmpty() && delInfo.tags.isEmpty()) || VisibilityLabelServiceManager .getInstance().getVisibilityLabelService() .matchVisibility(putVisTags, putCellVisTagsFormat, delInfo.tags, delInfo.format)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java index 78b5037cbe..fd479b4059 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/LeafExpressionNode.java @@ -58,6 +58,7 @@ public class LeafExpressionNode implements ExpressionNode { return true; } + @Override public LeafExpressionNode deepClone() { LeafExpressionNode clone = new LeafExpressionNode(this.identifier); return clone; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java index 77b34e9ad2..83610fadc8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/NonLeafExpressionNode.java @@ -91,6 +91,7 @@ public class NonLeafExpressionNode implements ExpressionNode { return this.op == Operator.NOT; } + @Override public NonLeafExpressionNode deepClone() { NonLeafExpressionNode clone = new NonLeafExpressionNode(this.op); for (ExpressionNode exp : this.childExps) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java index fdec5ac50d..f7ffe464cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/expression/Operator.java @@ -23,12 +23,13 @@ import org.apache.yetus.audience.InterfaceAudience; public enum Operator { AND('&'), OR('|'), NOT('!'); - private char rep; + private final char rep; private Operator(char rep) { this.rep = rep; } + @Override public String toString() { return String.valueOf(this.rep); }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 7d7e526b6d..befbef4447 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -311,7 +311,7 @@ public final class SnapshotInfo extends AbstractHBaseTool { !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)) { nonSharedHfilesArchiveSize.addAndGet(size); } - } else if (inArchive = fs.exists(link.getMobPath())) { + } else if ((inArchive = fs.exists(link.getMobPath()))) { size = fs.getFileStatus(link.getMobPath()).getLen(); hfilesMobSize.addAndGet(size); hfilesMobCount.incrementAndGet(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index a5468ee500..1d7f4f617a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -75,23 +75,28 @@ public final class SnapshotManifestV1 { this.fs = fs; } + @Override public HRegionFileSystem regionOpen(final RegionInfo regionInfo) throws IOException { HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, snapshotDir, regionInfo); return snapshotRegionFs; } + @Override public void regionClose(final HRegionFileSystem region) { } + @Override public Path familyOpen(final HRegionFileSystem snapshotRegionFs, final byte[] familyName) { Path familyDir = snapshotRegionFs.getStoreDir(Bytes.toString(familyName)); return familyDir; } + @Override public void familyClose(final HRegionFileSystem region, final Path family) { } + @Override public void storeFile(final HRegionFileSystem region, final Path familyDir, final StoreFileInfo storeFile) throws IOException { Path referenceFile = new Path(familyDir, storeFile.getPath().getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index 4d35f0b896..4e60d67894 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -77,12 +77,14 @@ public final class SnapshotManifestV2 { this.fs = fs; } + @Override public SnapshotRegionManifest.Builder regionOpen(final RegionInfo regionInfo) { SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); manifest.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo)); return manifest; } + @Override public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException { // we should ensure the snapshot dir exist, maybe it has been deleted by master // see HBASE-16464 @@ -99,6 +101,7 @@ public final class SnapshotManifestV2 { } } + @Override public SnapshotRegionManifest.FamilyFiles.Builder familyOpen( final SnapshotRegionManifest.Builder region, final byte[] familyName) { SnapshotRegionManifest.FamilyFiles.Builder family = @@ -107,11 +110,13 @@ public final class SnapshotManifestV2 { return family; } + @Override public void familyClose(final SnapshotRegionManifest.Builder region, final SnapshotRegionManifest.FamilyFiles.Builder family) { region.addFamilyFiles(family.build()); } + @Override public void storeFile(final SnapshotRegionManifest.Builder region, final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java index c14c944f5b..3cff04749d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java @@ -971,7 +971,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { continue; } Path familyDir = familyStat.getPath(); - byte[] familyName = familyDir.getName().getBytes(); + byte[] familyName = Bytes.toBytes(familyDir.getName()); // Skip invalid family try { ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(familyName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java index 995057059d..efad97ef38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java @@ -155,6 +155,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements this.queue = new PriorityQueue<>(capacity, comparator); } + @Override public boolean offer(E e) { if (e == null) throw new NullPointerException(); @@ -171,6 +172,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements return false; } + @Override public void put(E e) throws InterruptedException { if (e == null) throw new NullPointerException(); @@ -186,6 +188,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } } + @Override public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { if (e == null) throw new NullPointerException(); @@ -206,6 +209,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements return true; } + @Override public E take() throws InterruptedException { E result = null; lock.lockInterruptibly(); @@ -221,6 +225,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements return result; } + @Override public E poll() { E result = null; lock.lock(); @@ -235,6 +240,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements return result; } + @Override public E poll(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); @@ -254,6 +260,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements return result; } + @Override public E peek() { lock.lock(); try { @@ -263,6 +270,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } } + @Override public int size() { lock.lock(); try { @@ -272,6 +280,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } } + @Override public Iterator iterator() { throw new UnsupportedOperationException(); } @@ -280,6 +289,7 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements return queue.comparator(); } + @Override public int remainingCapacity() { lock.lock(); try { @@ -289,10 +299,12 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } } + @Override public boolean remove(Object o) { throw new UnsupportedOperationException(); } + @Override public boolean contains(Object o) { lock.lock(); try { @@ -302,10 +314,12 @@ public class BoundedPriorityBlockingQueue extends AbstractQueue implements } } + @Override public int drainTo(Collection c) { return drainTo(c, Integer.MAX_VALUE); } + @Override public int drainTo(Collection c, int maxElements) { if (c == null) throw new NullPointerException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java index 4207f391db..2cf3bb9d19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSMapRUtils.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.permission.FsPermission; public class FSMapRUtils extends FSUtils { private static final Logger LOG = LoggerFactory.getLogger(FSMapRUtils.class); + @Override public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter) throws IOException { LOG.info("Recovering file " + p.toString() + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index f258e6cd93..04a3384687 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -52,7 +52,7 @@ class FSRegionScanner implements Runnable { /** * Maps each region to the RS with highest locality for that region. */ - private Map regionToBestLocalityRSMapping; + private final Map regionToBestLocalityRSMapping; /** * Maps region encoded names to maps of hostnames to fractional locality of diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index c76cd902cd..c3f3bd8a18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -479,7 +479,7 @@ public class FSTableDescriptors implements TableDescriptors { // Clean away old versions for (FileStatus file : status) { Path path = file.getPath(); - if (file != mostCurrent) { + if (!file.equals(mostCurrent)) { if (!fs.delete(file.getPath(), false)) { LOG.warn("Failed cleanup of " + path); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index d0276c06b5..3ee13c4782 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -3800,7 +3800,7 @@ public class HBaseFsck extends Configured implements Closeable { @Override public int hashCode() { int hash = Arrays.hashCode(getRegionName()); - hash ^= getRegionId(); + hash = (int) (hash ^ getRegionId()); hash ^= Arrays.hashCode(getStartKey()); hash ^= Arrays.hashCode(getEndKey()); hash ^= Boolean.valueOf(isOffline()).hashCode(); @@ -3808,7 +3808,7 @@ public class HBaseFsck extends Configured implements Closeable { if (regionServer != null) { hash ^= regionServer.hashCode(); } - hash ^= modTime; + hash = (int) (hash ^ modTime); return hash; } } @@ -4054,7 +4054,7 @@ public class HBaseFsck extends Configured implements Closeable { return -1; } // both l.hdfsEntry and r.hdfsEntry must not be null. - return (int) (l.hdfsEntry.hri.getRegionId()- r.hdfsEntry.hri.getRegionId()); + return Long.compare(l.hdfsEntry.hri.getRegionId(), r.hdfsEntry.hri.getRegionId()); } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java index e06805c15e..eba9acdd04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java @@ -52,6 +52,7 @@ public class IdLock { this.id = id; } + @Override public String toString() { return "id=" + id + ", numWaiter=" + numWaiters + ", isLocked=" + locked; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 7b9cbb651f..75b8ccd1d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -685,7 +685,7 @@ public class RegionSplitter { } } try { - return splitClass.asSubclass(SplitAlgorithm.class).newInstance(); + return splitClass.asSubclass(SplitAlgorithm.class).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new IOException("Problem loading split algorithm: ", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java index 2819b826be..f1c9ad3424 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RowBloomContext.java @@ -38,6 +38,7 @@ public class RowBloomContext extends BloomContext { super(bloomFilterWriter, comparator); } + @Override public void addLastBloomKey(Writer writer) throws IOException { if (this.getLastCell() != null) { byte[] key = CellUtil.copyRow(this.getLastCell()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java index a897f400f5..d3da773b62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java @@ -52,10 +52,12 @@ abstract public class ShutdownHookManager { private static class ShutdownHookManagerV1 extends ShutdownHookManager { // priority is ignored in hadoop versions earlier than 2.0 - public void addShutdownHook(Thread shutdownHookThread, int priority) { + @Override + public void addShutdownHook(Thread shutdownHookThread, int priority) { Runtime.getRuntime().addShutdownHook(shutdownHookThread); } + @Override public boolean removeShutdownHook(Runnable shutdownHook) { Thread shutdownHookThread = null; if (!(shutdownHook instanceof Thread)) { @@ -67,6 +69,7 @@ abstract public class ShutdownHookManager { }; private static class ShutdownHookManagerV2 extends ShutdownHookManager { + @Override public void addShutdownHook(Thread shutdownHookThread, int priority) { try { Methods.call(shutdownHookManagerClass, @@ -79,6 +82,7 @@ abstract public class ShutdownHookManager { } } + @Override public boolean removeShutdownHook(Runnable shutdownHook) { try { return (Boolean) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index 74d502e077..d9badfa998 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; @@ -455,7 +456,7 @@ public abstract class AbstractFSWALProvider> implemen } catch (FileNotFoundException fnfe) { // If the log was archived, continue reading from there Path archivedLog = AbstractFSWALProvider.getArchivedLogPath(path, conf); - if (path != archivedLog) { + if (!Objects.equals(path, archivedLog)) { return openReader(archivedLog, conf); } else { throw fnfe; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 725f9ff3ec..2105490765 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -185,7 +185,7 @@ class DisabledWALProvider implements WALProvider { public void sync() { if (!this.listeners.isEmpty()) { for (WALActionsListener listener : this.listeners) { - listener.postSync(0l, 0); + listener.postSync(0L, 0); } } } @@ -195,6 +195,7 @@ class DisabledWALProvider implements WALProvider { sync(); } + @Override public Long startCacheFlush(final byte[] encodedRegionName, Map flushedFamilyNamesToSeq) { return startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq.keySet()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index 14505a8a9c..f1662bc516 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -63,7 +63,7 @@ public class FSHLogProvider extends AbstractFSWALProvider { ProtobufLogWriter.class, Writer.class); Writer writer = null; try { - writer = logWriterClass.newInstance(); + writer = logWriterClass.getDeclaredConstructor().newInstance(); writer.init(fs, path, conf, overwritable); return writer; } catch (Exception e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index a3e54a5a43..a0ef81762c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -104,19 +104,14 @@ public class RegionGroupingProvider implements WALProvider { } LOG.info("Instantiating RegionGroupingStrategy of type " + clazz); try { - final RegionGroupingStrategy result = clazz.newInstance(); + final RegionGroupingStrategy result = clazz.getDeclaredConstructor().newInstance(); result.init(conf, providerId); return result; - } catch (InstantiationException exception) { + } catch (Exception e) { LOG.error("couldn't set up region grouping strategy, check config key " + REGION_GROUPING_STRATEGY); - LOG.debug("Exception details for failure to load region grouping strategy.", exception); - throw new IOException("couldn't set up region grouping strategy", exception); - } catch (IllegalAccessException exception) { - LOG.error("couldn't set up region grouping strategy, check config key " + - REGION_GROUPING_STRATEGY); - LOG.debug("Exception details for failure to load region grouping strategy.", exception); - throw new IOException("couldn't set up region grouping strategy", exception); + LOG.debug("Exception details for failure to load region grouping strategy.", e); + throw new IOException("couldn't set up region grouping strategy", e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 4008bb08c8..d478e4f332 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -99,6 +99,7 @@ public interface WAL extends Closeable, WALFileLengthProvider { * underlying resources after this call; i.e. filesystem based WALs can archive or * delete files. */ + @Override void close() throws IOException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index d70b8cdbb4..d59c824936 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -75,7 +75,7 @@ public class WALFactory implements WALFileLengthProvider { multiwal(RegionGroupingProvider.class), asyncfs(AsyncFSWALProvider.class); - Class clazz; + final Class clazz; Providers(Class clazz) { this.clazz = clazz; } @@ -139,17 +139,13 @@ public class WALFactory implements WALFileLengthProvider { List listeners, String providerId) throws IOException { LOG.info("Instantiating WALProvider of type " + clazz); try { - final WALProvider result = clazz.newInstance(); + final WALProvider result = clazz.getDeclaredConstructor().newInstance(); result.init(this, conf, listeners, providerId); return result; - } catch (InstantiationException exception) { - LOG.error("couldn't set up WALProvider, the configured class is " + clazz); - LOG.debug("Exception details for failure to load WALProvider.", exception); - throw new IOException("couldn't set up WALProvider", exception); - } catch (IllegalAccessException exception) { + } catch (Exception e) { LOG.error("couldn't set up WALProvider, the configured class is " + clazz); - LOG.debug("Exception details for failure to load WALProvider.", exception); - throw new IOException("couldn't set up WALProvider", exception); + LOG.debug("Exception details for failure to load WALProvider.", e); + throw new IOException("couldn't set up WALProvider", e); } } @@ -294,7 +290,7 @@ public class WALFactory implements WALFileLengthProvider { AbstractFSWALProvider.Reader reader = null; while (true) { try { - reader = lrClass.newInstance(); + reader = lrClass.getDeclaredConstructor().newInstance(); reader.init(fs, path, conf, null); return reader; } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java index 983fae97ac..0a5acdad6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKeyImpl.java @@ -470,8 +470,8 @@ public class WALKeyImpl implements WALKey { @Override public int hashCode() { int result = Bytes.hashCode(this.encodedRegionName); - result ^= getSequenceId(); - result ^= this.writeTime; + result = (int) (result ^ getSequenceId()); + result = (int) (result ^ this.writeTime); return result; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 22f7e1a900..97f80e0244 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -1084,7 +1084,7 @@ public class WALSplitter { protected EntryBuffers entryBuffers; protected ConcurrentHashMap writers = new ConcurrentHashMap<>(); - protected ConcurrentHashMap regionMaximumEditLogSeqNum = + protected final ConcurrentHashMap regionMaximumEditLogSeqNum = new ConcurrentHashMap<>(); @@ -1645,8 +1645,10 @@ public class WALSplitter { List thrown, List paths) throws InterruptedException, ExecutionException { for (final Map.Entry buffer : entryBuffers.buffers.entrySet()) { - LOG.info("Submitting writeThenClose of " + buffer.getValue().encodedRegionName); + LOG.info("Submitting writeThenClose of {}", + Arrays.toString(buffer.getValue().encodedRegionName)); completionService.submit(new Callable() { + @Override public Void call() throws Exception { Path dst = writeThenClose(buffer.getValue()); paths.add(dst); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java index aec4bbda5f..44d3e87151 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java @@ -151,6 +151,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { table = connection.getTable(TABLE_NAME); } + @Override public void doAnAction() throws Exception { // Pick a random row to write into byte[] targetRow = targetRows[rand.nextInt(targetRows.length)]; @@ -197,6 +198,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { table = connection.getTable(TABLE_NAME); } + @Override public void doAnAction() throws Exception { Get g = new Get(targetRow); Result res = table.get(g); @@ -264,6 +266,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { table = connection.getTable(TABLE_NAME); } + @Override public void doAnAction() throws Exception { Scan s = new Scan(); for (byte[] family : targetFamilies) { @@ -344,6 +347,7 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool { } // Add a flusher ctx.addThread(new RepeatingTestThread(ctx) { + @Override public void doAnAction() throws Exception { try { admin.flush(TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 92581b8fb6..5f2ffb2f33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.NavigableMap; import org.apache.hadoop.conf.Configuration; @@ -383,7 +384,7 @@ public abstract class HBaseTestCase extends TestCase { if (res_value != null) { assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) + " at timestamp " + - timestamp, value, new String(res_value)); + timestamp, value, new String(res_value, StandardCharsets.UTF_8)); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index a686e3306a..75abd5e219 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -33,6 +33,7 @@ import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; @@ -1596,7 +1597,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { if (status.getSecond() != 0) { LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond() + " regions updated."); - Thread.sleep(1 * 1000l); + Thread.sleep(1 * 1000L); } else { LOG.debug("All regions updated."); break; @@ -1983,7 +1984,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { expectedCount = 1; } if (count != expectedCount) { - String row = new String(new byte[] {b1,b2,b3}); + String row = new String(new byte[] {b1,b2,b3}, StandardCharsets.UTF_8); throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " + "instead of " + expectedCount); } @@ -2079,7 +2080,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { get.setConsistency(Consistency.TIMELINE); Result result = table.get(get); assertTrue(failMsg, result.containsColumn(f, null)); - assertEquals(failMsg, result.getColumnCells(f, null).size(), 1); + assertEquals(failMsg, 1, result.getColumnCells(f, null).size()); Cell cell = result.getColumnLatestCell(f, null); assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(), @@ -2114,7 +2115,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { if (!present) continue; assertTrue(failMsg, result.containsColumn(f, null)); - assertEquals(failMsg, result.getColumnCells(f, null).size(), 1); + assertEquals(failMsg, 1, result.getColumnCells(f, null).size()); Cell cell = result.getColumnLatestCell(f, null); assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 378f6ecc72..9959e318a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -165,7 +165,8 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public void run() { try { - this.user.runAs(new PrivilegedAction(){ + this.user.runAs(new PrivilegedAction() { + @Override public Object run() { runRegionServer(); return null; @@ -195,6 +196,7 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public void abort(final String reason, final Throwable cause) { this.user.runAs(new PrivilegedAction() { + @Override public Object run() { abortRegionServer(reason, cause); return null; @@ -497,6 +499,7 @@ public class MiniHBaseCluster extends HBaseCluster { * Returns the current active master, if available. * @return the active HMaster, null if none is active. */ + @Override public MasterService.BlockingInterface getMasterAdminService() { return this.hbaseCluster.getActiveMaster().getMasterRpcServices(); } @@ -588,6 +591,7 @@ public class MiniHBaseCluster extends HBaseCluster { * masters left. * @throws InterruptedException */ + @Override public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { List mts; long start = System.currentTimeMillis(); @@ -628,6 +632,7 @@ public class MiniHBaseCluster extends HBaseCluster { /** * Shut down the mini HBase cluster */ + @Override public void shutdown() throws IOException { if (this.hbaseCluster != null) { this.hbaseCluster.shutdown(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index 1d8de45e6d..86ac2f8265 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -119,6 +119,7 @@ public abstract class MultithreadedTestUtil { this.ctx = ctx; } + @Override public void run() { try { doWork(); @@ -143,6 +144,7 @@ public abstract class MultithreadedTestUtil { super(ctx); } + @Override public final void doWork() throws Exception { try { while (ctx.shouldRun() && !stopped) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java index 06cfdcffef..f21d79d165 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java @@ -49,6 +49,7 @@ public class TestHDFSBlocksDistribution { } public class MockHDFSBlocksDistribution extends HDFSBlocksDistribution { + @Override public Map getHostAndWeights() { HashMap map = new HashMap<>(); map.put("test", new HostAndWeight(null, 100)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index c5cda27e17..12103618e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -93,7 +94,7 @@ public class TestIOFencing { } public abstract static class CompactionBlockerRegion extends HRegion { - volatile int compactCount = 0; + AtomicInteger compactCount = new AtomicInteger(); volatile CountDownLatch compactionsBlocked = new CountDownLatch(0); volatile CountDownLatch compactionsWaiting = new CountDownLatch(0); @@ -129,7 +130,7 @@ public class TestIOFencing { try { return super.compact(compaction, store, throughputController); } finally { - compactCount++; + compactCount.getAndIncrement(); } } @@ -139,7 +140,7 @@ public class TestIOFencing { try { return super.compact(compaction, store, throughputController, user); } finally { - compactCount++; + compactCount.getAndIncrement(); } } @@ -336,7 +337,7 @@ public class TestIOFencing { } LOG.info("Allowing compaction to proceed"); compactingRegion.allowCompactions(); - while (compactingRegion.compactCount == 0) { + while (compactingRegion.compactCount.get() == 0) { Thread.sleep(1000); } // The server we killed stays up until the compaction that was started before it was killed @@ -349,7 +350,7 @@ public class TestIOFencing { FIRST_BATCH_COUNT + SECOND_BATCH_COUNT); admin.majorCompact(TABLE_NAME); startWaitTime = System.currentTimeMillis(); - while (newRegion.compactCount == 0) { + while (newRegion.compactCount.get() == 0) { Thread.sleep(1000); assertTrue("New region never compacted", System.currentTimeMillis() - startWaitTime < 180000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index 7b6c5a5642..620abefe97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -163,6 +163,7 @@ public class TestMetaTableAccessorNoCluster { .thenThrow(new ServiceException("Server not running (2 of 3)")) .thenThrow(new ServiceException("Server not running (3 of 3)")) .thenAnswer(new Answer() { + @Override public ScanResponse answer(InvocationOnMock invocation) throws Throwable { ((HBaseRpcController) invocation.getArgument(0)).setCellScanner(CellUtil .createCellScanner(cellScannables)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java index 71492b1c90..63d2cc2b48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java @@ -127,14 +127,14 @@ public class TestMetaTableLocator { assertEquals(state, MetaTableLocator.getMetaRegionState(this.watcher).getState()); } MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN); - assertEquals(mtl.getMetaRegionLocation(this.watcher), SN); + assertEquals(SN, mtl.getMetaRegionLocation(this.watcher)); assertEquals(RegionState.State.OPEN, MetaTableLocator.getMetaRegionState(this.watcher).getState()); mtl.deleteMetaLocation(this.watcher); assertNull(MetaTableLocator.getMetaRegionState(this.watcher).getServerName()); - assertEquals(MetaTableLocator.getMetaRegionState(this.watcher).getState(), - RegionState.State.OFFLINE); + assertEquals(RegionState.State.OFFLINE, + MetaTableLocator.getMetaRegionState(this.watcher).getState()); assertNull(mtl.getMetaRegionLocation(this.watcher)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java index bdb74a4b19..24a8830474 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java @@ -48,6 +48,7 @@ import java.io.IOException; super(conf); } + @Override protected int movedRegionCleanerPeriod() { return 500; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java index 1a0215e5a9..acf7861056 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java @@ -101,6 +101,7 @@ public class TestMultiVersions { // TODO: Remove these deprecated classes or pull them in here if this is // only test using them. TimestampTestBase.doTestDelete(table, new FlushCache() { + @Override public void flushcache() throws IOException { UTIL.getHBaseCluster().flushcache(); } @@ -109,6 +110,7 @@ public class TestMultiVersions { // Perhaps drop and readd the table between tests so the former does // not pollute this latter? Or put into separate tests. TimestampTestBase.doTestTimestampScanning(table, new FlushCache() { + @Override public void flushcache() throws IOException { UTIL.getMiniHBaseCluster().flushcache(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java index c1b5dac051..cfc5c2f1ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java @@ -94,9 +94,8 @@ public class TestServerName { ServerName.valueOf("www.example.org", 1234, 5678).toString()); assertEquals(sn.toString(), ServerName.valueOf("www.example.org:1234", 5678).toString()); - assertEquals(sn.toString(), - "www.example.org" + ServerName.SERVERNAME_SEPARATOR + "1234" + - ServerName.SERVERNAME_SEPARATOR + "5678"); + assertEquals("www.example.org" + ServerName.SERVERNAME_SEPARATOR + "1234" + + ServerName.SERVERNAME_SEPARATOR + "5678", sn.toString()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java index 26398210e7..4aeedb9d68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java @@ -205,7 +205,8 @@ public class TestServerSideScanMetricsFromClientSide { } // The filter should filter out all rows, but we still expect to see every row. - Filter filter = new RowFilter(CompareOperator.EQUAL, new BinaryComparator("xyz".getBytes())); + Filter filter = + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("xyz"))); scan = new Scan(baseScan); scan.setFilter(filter); testMetric(scan, ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME, ROWS.length); @@ -255,7 +256,8 @@ public class TestServerSideScanMetricsFromClientSide { testRowsFilteredMetric(baseScan, null, 0); // Row filter doesn't match any row key. All rows should be filtered - Filter filter = new RowFilter(CompareOperator.EQUAL, new BinaryComparator("xyz".getBytes())); + Filter filter = + new RowFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("xyz"))); testRowsFilteredMetric(baseScan, filter, ROWS.length); // Filter will return results containing only the first key. Number of entire rows filtered @@ -269,7 +271,7 @@ public class TestServerSideScanMetricsFromClientSide { testRowsFilteredMetric(baseScan, filter, 0); // Column prefix will NOT find any matching qualifier on any row. All rows should be filtered - filter = new ColumnPrefixFilter("xyz".getBytes()); + filter = new ColumnPrefixFilter(Bytes.toBytes("xyz")); testRowsFilteredMetric(baseScan, filter, ROWS.length); // Matching column value should exist in each row. No rows should be filtered. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index fba429a95a..f81a36d599 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -421,7 +421,7 @@ public class TestAdmin1 { this.admin.createTable(htd); Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); TableDescriptor confirmedHtd = table.getDescriptor(); - assertEquals(TableDescriptor.COMPARATOR.compare(htd, confirmedHtd), 0); + assertEquals(0, TableDescriptor.COMPARATOR.compare(htd, confirmedHtd)); MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection()); table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 05b8edccb1..57bd158448 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -303,7 +303,7 @@ public class TestAdmin2 { TableName tableName = TableName .valueOf("testTableNotFoundExceptionWithoutAnyTables"); Table ht = TEST_UTIL.getConnection().getTable(tableName); - ht.get(new Get("e".getBytes())); + ht.get(new Get(Bytes.toBytes("e"))); } @Test (timeout=300000) @@ -582,8 +582,9 @@ public class TestAdmin2 { } // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table // actually getting disabled by the disableTable() call. - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName().getBytes())); - HColumnDescriptor hcd = new HColumnDescriptor("cf1".getBytes()); + HTableDescriptor htd = + new HTableDescriptor(TableName.valueOf(Bytes.toBytes(name.getMethodName()))); + HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf1")); htd.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(htd); } @@ -695,13 +696,13 @@ public class TestAdmin2 { assertTrue(decommissionedRegionServers.isEmpty()); final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.createMultiRegionTable(tableName, "f".getBytes(), 6); + TEST_UTIL.createMultiRegionTable(tableName, Bytes.toBytes("f"), 6); ArrayList clusterRegionServers = new ArrayList<>(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)) .getLiveServerMetrics().keySet()); - assertEquals(clusterRegionServers.size(), 3); + assertEquals(3, clusterRegionServers.size()); HashMap> serversToDecommssion = new HashMap<>(); // Get a server that has regions. We will decommission two of the servers, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java index ab8ebb5e8f..3344c4b470 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi2.java @@ -56,6 +56,7 @@ public class TestAsyncClusterAdminApi2 extends TestAsyncAdminBase { } @Before + @Override public void setUp() throws Exception { TEST_UTIL.startMiniCluster(1, 3); ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); @@ -63,6 +64,7 @@ public class TestAsyncClusterAdminApi2 extends TestAsyncAdminBase { } @After + @Override public void tearDown() throws Exception { IOUtils.closeQuietly(ASYNC_CONN); TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java index 8c2b060129..8968b394c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncDecommissionAdminApi.java @@ -49,7 +49,7 @@ public class TestAsyncDecommissionAdminApi extends TestAsyncAdminBase { new ArrayList<>(admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).get() .getLiveServerMetrics().keySet()); - assertEquals(clusterRegionServers.size(), 2); + assertEquals(2, clusterRegionServers.size()); HashMap> serversToDecommssion = new HashMap<>(); // Get a server that has regions. We will decommission one of the servers, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java index 7a2c00f733..d50e0391bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcedureAdminApi.java @@ -90,7 +90,7 @@ public class TestAsyncProcedureAdminApi extends TestAsyncAdminBase { byte[] result = admin.execProcedureWithReturn(SimpleMasterProcedureManager.SIMPLE_SIGNATURE, "myTest2", new HashMap<>()).get(); assertArrayEquals("Incorrect return data from execProcedure", - SimpleMasterProcedureManager.SIMPLE_DATA.getBytes(), result); + Bytes.toBytes(SimpleMasterProcedureManager.SIMPLE_DATA), result); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index dcccfd168b..91f797145a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; -import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -204,14 +203,14 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { rs -> { ServerName serverName = rs.getServerName(); try { - Assert.assertEquals(admin.getRegions(serverName).get().size(), rs + assertEquals(admin.getRegions(serverName).get().size(), rs .getRegions().size()); } catch (Exception e) { fail("admin.getOnlineRegions() method throws a exception: " + e.getMessage()); } regionServerCount.incrementAndGet(); }); - Assert.assertEquals(regionServerCount.get(), 2); + assertEquals(2, regionServerCount.get()); } @Test @@ -229,7 +228,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-1"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); // flush region and wait flush operation finished. LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName())); admin.flushRegion(hri.getRegionName()).get(); @@ -239,20 +238,20 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); + assertEquals(0, regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()); // write another put into the specific region ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-2"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); admin.flush(tableName).get(); Threads.sleepWithoutInterrupt(500); while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); + assertEquals(0, regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()); } @Test @@ -421,7 +420,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { LOG.error(e.toString(), e); } } - assertEquals(count, 2); + assertEquals(2, count); } private void waitUntilMobCompactionFinished(TableName tableName) @@ -471,23 +470,23 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { .map(rsThread -> rsThread.getRegionServer()).collect(Collectors.toList()); List regions = new ArrayList<>(); rsList.forEach(rs -> regions.addAll(rs.getRegions(tableName))); - Assert.assertEquals(regions.size(), 1); + assertEquals(1, regions.size()); int countBefore = countStoreFilesInFamilies(regions, families); - Assert.assertTrue(countBefore > 0); + assertTrue(countBefore > 0); // Minor compaction for all region servers. for (HRegionServer rs : rsList) admin.compactRegionServer(rs.getServerName()).get(); Thread.sleep(5000); int countAfterMinorCompaction = countStoreFilesInFamilies(regions, families); - Assert.assertTrue(countAfterMinorCompaction < countBefore); + assertTrue(countAfterMinorCompaction < countBefore); // Major compaction for all region servers. for (HRegionServer rs : rsList) admin.majorCompactRegionServer(rs.getServerName()).get(); Thread.sleep(5000); int countAfterMajorCompaction = countStoreFilesInFamilies(regions, families); - Assert.assertEquals(countAfterMajorCompaction, 3); + assertEquals(3, countAfterMajorCompaction); } @Test @@ -512,7 +511,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { .getHBaseCluster() .getLiveRegionServerThreads() .forEach(rsThread -> regions.addAll(rsThread.getRegionServer().getRegions(tableName))); - Assert.assertEquals(regions.size(), 1); + assertEquals(1, regions.size()); int countBefore = countStoreFilesInFamilies(regions, families); int countBeforeSingleFamily = countStoreFilesInFamily(regions, family); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java index 9ceb172546..efea20e771 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApiWithClusters.java @@ -81,6 +81,7 @@ public class TestAsyncReplicationAdminApiWithClusters extends TestAsyncAdminBase ASYNC_CONN.getAdmin().addReplicationPeer(ID_SECOND, rpc).join(); } + @Override @After public void tearDown() throws Exception { Pattern pattern = Pattern.compile(tableName.getNameAsString() + ".*"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java index 5014e9655d..7501192d8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSnapshotAdminApi.java @@ -18,22 +18,25 @@ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; -import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.regex.Pattern; - @RunWith(Parameterized.class) @Category({ LargeTests.class, ClientTests.class }) public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { @@ -65,19 +68,19 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { admin.snapshot(snapshotName2, tableName).get(); List snapshots = syncAdmin.listSnapshots(); Collections.sort(snapshots, (snap1, snap2) -> { - Assert.assertNotNull(snap1); - Assert.assertNotNull(snap1.getName()); - Assert.assertNotNull(snap2); - Assert.assertNotNull(snap2.getName()); + assertNotNull(snap1); + assertNotNull(snap1.getName()); + assertNotNull(snap2); + assertNotNull(snap2.getName()); return snap1.getName().compareTo(snap2.getName()); }); - Assert.assertEquals(snapshotName1, snapshots.get(0).getName()); - Assert.assertEquals(tableName, snapshots.get(0).getTableName()); - Assert.assertEquals(SnapshotType.FLUSH, snapshots.get(0).getType()); - Assert.assertEquals(snapshotName2, snapshots.get(1).getName()); - Assert.assertEquals(tableName, snapshots.get(1).getTableName()); - Assert.assertEquals(SnapshotType.FLUSH, snapshots.get(1).getType()); + assertEquals(snapshotName1, snapshots.get(0).getName()); + assertEquals(tableName, snapshots.get(0).getTableName()); + assertEquals(SnapshotType.FLUSH, snapshots.get(0).getType()); + assertEquals(snapshotName2, snapshots.get(1).getName()); + assertEquals(tableName, snapshots.get(1).getTableName()); + assertEquals(SnapshotType.FLUSH, snapshots.get(1).getType()); } @Test @@ -93,10 +96,10 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { admin.snapshot(snapshotName1, tableName).get(); List snapshots = syncAdmin.listSnapshots(); - Assert.assertEquals(snapshots.size(), 1); - Assert.assertEquals(snapshotName1, snapshots.get(0).getName()); - Assert.assertEquals(tableName, snapshots.get(0).getTableName()); - Assert.assertEquals(SnapshotType.FLUSH, snapshots.get(0).getType()); + assertEquals(1, snapshots.size()); + assertEquals(snapshotName1, snapshots.get(0).getName()); + assertEquals(tableName, snapshots.get(0).getTableName()); + assertEquals(SnapshotType.FLUSH, snapshots.get(0).getType()); // cloneSnapshot into a existed table. boolean failed = false; @@ -105,10 +108,10 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { } catch (Exception e) { failed = true; } - Assert.assertTrue(failed); + assertTrue(failed); // cloneSnapshot into a new table. - Assert.assertTrue(!syncAdmin.tableExists(tableName2)); + assertTrue(!syncAdmin.tableExists(tableName2)); admin.cloneSnapshot(snapshotName1, tableName2).get(); syncAdmin.tableExists(tableName2); } @@ -120,12 +123,12 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { Result result; int rowCount = 0; while ((result = scanner.next()) != null) { - Assert.assertArrayEquals(result.getRow(), Bytes.toBytes(rowCount)); - Assert.assertArrayEquals(result.getValue(Bytes.toBytes("f1"), Bytes.toBytes("cq")), + assertArrayEquals(result.getRow(), Bytes.toBytes(rowCount)); + assertArrayEquals(result.getValue(Bytes.toBytes("f1"), Bytes.toBytes("cq")), Bytes.toBytes(rowCount)); rowCount += 1; } - Assert.assertEquals(rowCount, expectedRowCount); + assertEquals(rowCount, expectedRowCount); } } } @@ -137,11 +140,11 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { table.put(new Put(Bytes.toBytes(i)).addColumn(Bytes.toBytes("f1"), Bytes.toBytes("cq"), Bytes.toBytes(i))); } - Assert.assertEquals(admin.listSnapshots().get().size(), 0); + assertEquals(0, admin.listSnapshots().get().size()); admin.snapshot(snapshotName1, tableName).get(); admin.snapshot(snapshotName2, tableName).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 2); + assertEquals(2, admin.listSnapshots().get().size()); admin.disableTable(tableName).get(); admin.restoreSnapshot(snapshotName1, true).get(); @@ -161,29 +164,23 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { table.put(new Put(Bytes.toBytes(i)).addColumn(Bytes.toBytes("f1"), Bytes.toBytes("cq"), Bytes.toBytes(i))); } - Assert.assertEquals(admin.listSnapshots().get().size(), 0); + assertEquals(0, admin.listSnapshots().get().size()); admin.snapshot(snapshotName1, tableName).get(); admin.snapshot(snapshotName2, tableName).get(); admin.snapshot(snapshotName3, tableName).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 3); - - Assert.assertEquals(admin.listSnapshots(Pattern.compile("(.*)")).get().size(), 3); - Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshotName(\\d+)")).get().size(), 3); - Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshotName[1|3]")).get().size(), 2); - Assert.assertEquals(admin.listSnapshots(Pattern.compile("snapshot(.*)")).get().size(), 3); - Assert.assertEquals( - admin.listTableSnapshots(Pattern.compile("testListSnapshots"), Pattern.compile("s(.*)")).get() - .size(), - 3); - Assert.assertEquals( - admin.listTableSnapshots(Pattern.compile("fakeTableName"), Pattern.compile("snap(.*)")).get() - .size(), - 0); - Assert.assertEquals( - admin.listTableSnapshots(Pattern.compile("test(.*)"), Pattern.compile("snap(.*)[1|3]")).get() - .size(), - 2); + assertEquals(3, admin.listSnapshots().get().size()); + + assertEquals(3, admin.listSnapshots(Pattern.compile("(.*)")).get().size()); + assertEquals(3, admin.listSnapshots(Pattern.compile("snapshotName(\\d+)")).get().size()); + assertEquals(2, admin.listSnapshots(Pattern.compile("snapshotName[1|3]")).get().size()); + assertEquals(3, admin.listSnapshots(Pattern.compile("snapshot(.*)")).get().size()); + assertEquals(3, admin.listTableSnapshots(Pattern.compile("testListSnapshots"), + Pattern.compile("s(.*)")).get().size()); + assertEquals(0, admin.listTableSnapshots(Pattern.compile("fakeTableName"), + Pattern.compile("snap(.*)")).get().size()); + assertEquals(2, admin.listTableSnapshots(Pattern.compile("test(.*)"), + Pattern.compile("snap(.*)[1|3]")).get().size()); } @Test @@ -193,29 +190,29 @@ public class TestAsyncSnapshotAdminApi extends TestAsyncAdminBase { table.put(new Put(Bytes.toBytes(i)).addColumn(Bytes.toBytes("f1"), Bytes.toBytes("cq"), Bytes.toBytes(i))); } - Assert.assertEquals(admin.listSnapshots().get().size(), 0); + assertEquals(0, admin.listSnapshots().get().size()); admin.snapshot(snapshotName1, tableName).get(); admin.snapshot(snapshotName2, tableName).get(); admin.snapshot(snapshotName3, tableName).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 3); + assertEquals(3, admin.listSnapshots().get().size()); admin.deleteSnapshot(snapshotName1).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 2); + assertEquals(2, admin.listSnapshots().get().size()); admin.deleteSnapshots(Pattern.compile("(.*)abc")).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 2); + assertEquals(2, admin.listSnapshots().get().size()); admin.deleteSnapshots(Pattern.compile("(.*)1")).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 2); + assertEquals(2, admin.listSnapshots().get().size()); admin.deleteTableSnapshots(Pattern.compile("(.*)"), Pattern.compile("(.*)1")).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 2); + assertEquals(2, admin.listSnapshots().get().size()); admin.deleteTableSnapshots(Pattern.compile("(.*)"), Pattern.compile("(.*)2")).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 1); + assertEquals(1, admin.listSnapshots().get().size()); admin.deleteTableSnapshots(Pattern.compile("(.*)"), Pattern.compile("(.*)3")).get(); - Assert.assertEquals(admin.listSnapshots().get().size(), 0); + assertEquals(0, admin.listSnapshots().get().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 529346fe9f..8b2dce3904 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -135,7 +135,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase { admin.createTable(desc).join(); ModifyableTableDescriptor modifyableDesc = ((ModifyableTableDescriptor) desc); TableDescriptor confirmedHtd = admin.getDescriptor(tableName).get(); - assertEquals(modifyableDesc.compareTo((ModifyableTableDescriptor) confirmedHtd), 0); + assertEquals(0, modifyableDesc.compareTo((ModifyableTableDescriptor) confirmedHtd)); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java index f47e6e96cd..7848251cf2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java @@ -212,13 +212,13 @@ public class TestAsyncTableBatch { .collect(Collectors.toList())).get(); List actions = new ArrayList<>(); actions.add(new Get(Bytes.toBytes(0))); - actions.add(new Put(Bytes.toBytes(1)).addColumn(FAMILY, CQ, Bytes.toBytes((long) 2))); + actions.add(new Put(Bytes.toBytes(1)).addColumn(FAMILY, CQ, Bytes.toBytes(2L))); actions.add(new Delete(Bytes.toBytes(2))); actions.add(new Increment(Bytes.toBytes(3)).addColumn(FAMILY, CQ, 1)); actions.add(new Append(Bytes.toBytes(4)).addColumn(FAMILY, CQ, Bytes.toBytes(4))); RowMutations rm = new RowMutations(Bytes.toBytes(5)); - rm.add(new Put(Bytes.toBytes(5)).addColumn(FAMILY, CQ, Bytes.toBytes((long) 100))); - rm.add(new Put(Bytes.toBytes(5)).addColumn(FAMILY, CQ1, Bytes.toBytes((long) 200))); + rm.add(new Put(Bytes.toBytes(5)).addColumn(FAMILY, CQ, Bytes.toBytes(100L))); + rm.add(new Put(Bytes.toBytes(5)).addColumn(FAMILY, CQ1, Bytes.toBytes(200L))); actions.add(rm); actions.add(new Get(Bytes.toBytes(6))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index 521d2f54d1..ba61ab4688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -189,7 +189,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } - assertEquals("Count all the rows ", count, 6); + assertEquals("Count all the rows ", 6, count); // all the cache is loaded // trigger a major compaction ScannerThread scannerThread = new ScannerThread(table, cache); @@ -200,7 +200,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } - assertEquals("Count all the rows ", count, 6); + assertEquals("Count all the rows ", 6, count); } finally { table.close(); } @@ -215,6 +215,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { this.cache = cache; } + @Override public void run() { Scan s = new Scan().withStartRow(ROW4).withStopRow(ROW5).setCaching(1); try { @@ -346,7 +347,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { try (ResultScanner scanner = table.getScanner(s)) { count = Iterables.size(scanner); } - assertEquals("Count all the rows ", count, 6); + assertEquals("Count all the rows ", 6, count); // Scan from cache s = new Scan(); @@ -379,7 +380,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { iterator.next(); refBlockCount++; } - assertEquals("One block should be there ", refBlockCount, 1); + assertEquals("One block should be there ", 1, refBlockCount); // Rescan to prepopulate the data // cache this row. Scan s1 = new Scan(); @@ -392,7 +393,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { try { scanner = table.getScanner(s1); int count = Iterables.size(scanner); - assertEquals("Count the rows", count, 2); + assertEquals("Count the rows", 2, count); iterator = cache.iterator(); List newCacheList = new ArrayList<>(); while (iterator.hasNext()) { @@ -407,7 +408,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { } } - assertEquals("old blocks should still be found ", newBlockRefCount, 6); + assertEquals("old blocks should still be found ", 6, newBlockRefCount); latch.countDown(); } catch (IOException e) { @@ -423,7 +424,7 @@ public class TestAvoidCellReferencesIntoShippedBlocks { } } } - assertEquals("Count should give all rows ", count, 10); + assertEquals("Count should give all rows ", 10, count); } finally { table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index 62eb31674f..9103c515cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -127,7 +127,7 @@ public class TestClientPushback { regionStats.getMemStoreLoadPercent()); // check that the load reported produces a nonzero delay long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats); - assertNotEquals("Reported load does not produce a backoff", backoffTime, 0); + assertNotEquals("Reported load does not produce a backoff", 0, backoffTime); LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + server + " is " + backoffTime); @@ -166,13 +166,13 @@ public class TestClientPushback { MetricsConnection.RunnerStats runnerStats = conn.getConnectionMetrics().runnerStats; - assertEquals(runnerStats.delayRunners.getCount(), 1); - assertEquals(runnerStats.normalRunners.getCount(), 1); + assertEquals(1, runnerStats.delayRunners.getCount()); + assertEquals(1, runnerStats.normalRunners.getCount()); assertEquals("", runnerStats.delayIntevalHist.getSnapshot().getMean(), (double)backoffTime, 0.1); latch.await(backoffTime * 2, TimeUnit.MILLISECONDS); - assertNotEquals("AsyncProcess did not submit the work time", endTime.get(), 0); + assertNotEquals("AsyncProcess did not submit the work time", 0, endTime.get()); assertTrue("AsyncProcess did not delay long enough", endTime.get() - startTime >= backoffTime); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java index 2ad49f8def..f061fc98cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java @@ -120,6 +120,7 @@ public class TestConnectionImplementation { TEST_UTIL.shutdownMiniCluster(); } + @Test public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS, @@ -636,7 +637,7 @@ public class TestConnectionImplementation { LOG.info("Put done, exception caught: " + e.getClass()); Assert.assertEquals(1, e.getNumExceptions()); Assert.assertEquals(1, e.getCauses().size()); - Assert.assertArrayEquals(e.getRow(0).getRow(), ROW); + Assert.assertArrayEquals(ROW, e.getRow(0).getRow()); // Check that we unserialized the exception as expected Throwable cause = ClientExceptionsUtil.findException(e.getCause(0)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java index 47516ec758..b2eae851e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java @@ -171,6 +171,7 @@ public class TestFastFail { * will follow the killing of a regionserver so that we make sure that * some of threads go into PreemptiveFastFailExcception */ + @Override public Boolean call() throws Exception { try (Table table = connection.getTable(TableName.valueOf(tableName))) { Thread.sleep(Math.abs(random.nextInt()) % 250); // Add some jitter here @@ -275,7 +276,7 @@ public class TestFastFail { "All the failures should be coming from the secondput failure", numFailedThreads.get(), numThreadsReturnedFalse); assertEquals("Number of threads that threw execution exceptions " - + "otherwise should be 0", numThreadsThrewExceptions, 0); + + "otherwise should be 0", 0, numThreadsThrewExceptions); assertEquals("The regionservers that returned true should equal to the" + " number of successful threads", numThreadsReturnedTrue, numSuccessfullThreads.get()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 3af245fc96..952905ac61 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -203,125 +203,123 @@ public class TestFromClientSide { /** * Basic client side validation of HBASE-4536 */ - @Test - public void testKeepDeletedCells() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - final byte[] FAMILY = Bytes.toBytes("family"); - final byte[] C0 = Bytes.toBytes("c0"); - - final byte[] T1 = Bytes.toBytes("T1"); - final byte[] T2 = Bytes.toBytes("T2"); - final byte[] T3 = Bytes.toBytes("T3"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .setMaxVersions(3); - - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(hcd); - TEST_UTIL.getAdmin().createTable(desc); - Table h = TEST_UTIL.getConnection().getTable(tableName); - - long ts = System.currentTimeMillis(); - Put p = new Put(T1, ts); - p.addColumn(FAMILY, C0, T1); - h.put(p); - p = new Put(T1, ts+2); - p.addColumn(FAMILY, C0, T2); - h.put(p); - p = new Put(T1, ts+4); - p.addColumn(FAMILY, C0, T3); - h.put(p); - - Delete d = new Delete(T1, ts+3); - h.delete(d); - - d = new Delete(T1, ts+3); - d.addColumns(FAMILY, C0, ts+3); - h.delete(d); - - Get g = new Get(T1); - // does *not* include the delete - g.setTimeRange(0, ts+3); - Result r = h.get(g); - assertArrayEquals(T2, r.getValue(FAMILY, C0)); - - Scan s = new Scan(T1); - s.setTimeRange(0, ts+3); - s.setMaxVersions(); - ResultScanner scanner = h.getScanner(s); - Cell[] kvs = scanner.next().rawCells(); - assertArrayEquals(T2, CellUtil.cloneValue(kvs[0])); - assertArrayEquals(T1, CellUtil.cloneValue(kvs[1])); - scanner.close(); - - s = new Scan(T1); - s.setRaw(true); - s.setMaxVersions(); - scanner = h.getScanner(s); - kvs = scanner.next().rawCells(); - assertTrue(PrivateCellUtil.isDeleteFamily(kvs[0])); - assertArrayEquals(T3, CellUtil.cloneValue(kvs[1])); - assertTrue(CellUtil.isDelete(kvs[2])); - assertArrayEquals(T2, CellUtil.cloneValue(kvs[3])); - assertArrayEquals(T1, CellUtil.cloneValue(kvs[4])); - scanner.close(); - h.close(); - } - - /** - * Basic client side validation of HBASE-10118 - */ - @Test - public void testPurgeFutureDeletes() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - final byte[] ROW = Bytes.toBytes("row"); - final byte[] FAMILY = Bytes.toBytes("family"); - final byte[] COLUMN = Bytes.toBytes("column"); - final byte[] VALUE = Bytes.toBytes("value"); - - Table table = TEST_UTIL.createTable(tableName, FAMILY); - - // future timestamp - long ts = System.currentTimeMillis() * 2; - Put put = new Put(ROW, ts); - put.addColumn(FAMILY, COLUMN, VALUE); - table.put(put); - - Get get = new Get(ROW); - Result result = table.get(get); - assertArrayEquals(VALUE, result.getValue(FAMILY, COLUMN)); - - Delete del = new Delete(ROW); - del.addColumn(FAMILY, COLUMN, ts); - table.delete(del); - - get = new Get(ROW); - result = table.get(get); - assertNull(result.getValue(FAMILY, COLUMN)); - - // major compaction, purged future deletes - TEST_UTIL.getAdmin().flush(tableName); - TEST_UTIL.getAdmin().majorCompact(tableName); - - // waiting for the major compaction to complete - TEST_UTIL.waitFor(6000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws IOException { - return TEST_UTIL.getAdmin().getCompactionState(tableName) == - CompactionState.NONE; - } - }); - - put = new Put(ROW, ts); - put.addColumn(FAMILY, COLUMN, VALUE); - table.put(put); - - get = new Get(ROW); - result = table.get(get); - assertArrayEquals(VALUE, result.getValue(FAMILY, COLUMN)); - - table.close(); - } + @Test + public void testKeepDeletedCells() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + final byte[] FAMILY = Bytes.toBytes("family"); + final byte[] C0 = Bytes.toBytes("c0"); + + final byte[] T1 = Bytes.toBytes("T1"); + final byte[] T2 = Bytes.toBytes("T2"); + final byte[] T3 = Bytes.toBytes("T3"); + HColumnDescriptor hcd = + new HColumnDescriptor(FAMILY).setKeepDeletedCells(KeepDeletedCells.TRUE).setMaxVersions(3); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(hcd); + TEST_UTIL.getAdmin().createTable(desc); + Table h = TEST_UTIL.getConnection().getTable(tableName); + + long ts = System.currentTimeMillis(); + Put p = new Put(T1, ts); + p.addColumn(FAMILY, C0, T1); + h.put(p); + p = new Put(T1, ts + 2); + p.addColumn(FAMILY, C0, T2); + h.put(p); + p = new Put(T1, ts + 4); + p.addColumn(FAMILY, C0, T3); + h.put(p); + + Delete d = new Delete(T1, ts + 3); + h.delete(d); + + d = new Delete(T1, ts + 3); + d.addColumns(FAMILY, C0, ts + 3); + h.delete(d); + + Get g = new Get(T1); + // does *not* include the delete + g.setTimeRange(0, ts + 3); + Result r = h.get(g); + assertArrayEquals(T2, r.getValue(FAMILY, C0)); + + Scan s = new Scan(T1); + s.setTimeRange(0, ts + 3); + s.setMaxVersions(); + ResultScanner scanner = h.getScanner(s); + Cell[] kvs = scanner.next().rawCells(); + assertArrayEquals(T2, CellUtil.cloneValue(kvs[0])); + assertArrayEquals(T1, CellUtil.cloneValue(kvs[1])); + scanner.close(); + + s = new Scan(T1); + s.setRaw(true); + s.setMaxVersions(); + scanner = h.getScanner(s); + kvs = scanner.next().rawCells(); + assertTrue(PrivateCellUtil.isDeleteFamily(kvs[0])); + assertArrayEquals(T3, CellUtil.cloneValue(kvs[1])); + assertTrue(CellUtil.isDelete(kvs[2])); + assertArrayEquals(T2, CellUtil.cloneValue(kvs[3])); + assertArrayEquals(T1, CellUtil.cloneValue(kvs[4])); + scanner.close(); + h.close(); + } + + /** + * Basic client side validation of HBASE-10118 + */ + @Test + public void testPurgeFutureDeletes() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + final byte[] ROW = Bytes.toBytes("row"); + final byte[] FAMILY = Bytes.toBytes("family"); + final byte[] COLUMN = Bytes.toBytes("column"); + final byte[] VALUE = Bytes.toBytes("value"); + + Table table = TEST_UTIL.createTable(tableName, FAMILY); + + // future timestamp + long ts = System.currentTimeMillis() * 2; + Put put = new Put(ROW, ts); + put.addColumn(FAMILY, COLUMN, VALUE); + table.put(put); + + Get get = new Get(ROW); + Result result = table.get(get); + assertArrayEquals(VALUE, result.getValue(FAMILY, COLUMN)); + + Delete del = new Delete(ROW); + del.addColumn(FAMILY, COLUMN, ts); + table.delete(del); + + get = new Get(ROW); + result = table.get(get); + assertNull(result.getValue(FAMILY, COLUMN)); + + // major compaction, purged future deletes + TEST_UTIL.getAdmin().flush(tableName); + TEST_UTIL.getAdmin().majorCompact(tableName); + + // waiting for the major compaction to complete + TEST_UTIL.waitFor(6000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return TEST_UTIL.getAdmin().getCompactionState(tableName) == CompactionState.NONE; + } + }); + + put = new Put(ROW, ts); + put.addColumn(FAMILY, COLUMN, VALUE); + table.put(put); + + get = new Get(ROW); + result = table.get(get); + assertArrayEquals(VALUE, result.getValue(FAMILY, COLUMN)); + + table.close(); + } /** * Verifies that getConfiguration returns the same Configuration object used @@ -660,13 +658,13 @@ public class TestFromClientSide { ResultScanner scanner = ht.getScanner(scan); int expectedIndex = 1; for(Result result : ht.getScanner(scan)) { - assertEquals(result.size(), 1); + assertEquals(1, result.size()); assertTrue(Bytes.equals(CellUtil.cloneRow(result.rawCells()[0]), ROWS[expectedIndex])); assertTrue(Bytes.equals(CellUtil.cloneQualifier(result.rawCells()[0]), QUALIFIERS[expectedIndex])); expectedIndex++; } - assertEquals(expectedIndex, 6); + assertEquals(6, expectedIndex); scanner.close(); } @@ -693,11 +691,11 @@ public class TestFromClientSide { ResultScanner scanner = ht.getScanner(scan); int expectedIndex = 0; for(Result result : ht.getScanner(scan)) { - assertEquals(result.size(), 1); + assertEquals(1, result.size()); assertTrue(Bytes.toLong(result.getValue(FAMILY, QUALIFIER)) > 500); expectedIndex++; } - assertEquals(expectedIndex, 4); + assertEquals(4, expectedIndex); scanner.close(); } @@ -726,12 +724,12 @@ public class TestFromClientSide { ResultScanner scanner = ht.getScanner(scan); int count = 0; for(Result result : ht.getScanner(scan)) { - assertEquals(result.size(), 1); - assertEquals(result.rawCells()[0].getValueLength(), Bytes.SIZEOF_INT); - assertEquals(Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0])), VALUE.length); + assertEquals(1, result.size()); + assertEquals(Bytes.SIZEOF_INT, result.rawCells()[0].getValueLength()); + assertEquals(VALUE.length, Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0]))); count++; } - assertEquals(count, 10); + assertEquals(10, count); scanner.close(); } @@ -3499,7 +3497,9 @@ public class TestFromClientSide { private long [] makeStamps(int n) { long [] stamps = new long[n]; - for(int i=0;i metaCachePreservingExceptions = metaCachePreservingExceptions(); + @Override public void throwOnGet(FakeRSRpcServices rpcServices, ClientProtos.GetRequest request) throws ServiceException { throwSomeExceptions(rpcServices, request.getRegion()); } + @Override public void throwOnMutate(FakeRSRpcServices rpcServices, ClientProtos.MutateRequest request) throws ServiceException { throwSomeExceptions(rpcServices, request.getRegion()); } + @Override public void throwOnScan(FakeRSRpcServices rpcServices, ClientProtos.ScanRequest request) throws ServiceException { if (!request.hasScannerId()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index ee39a83c37..d73ebc0184 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -180,7 +180,7 @@ public class TestMultipleTimestamps { Integer[] scanRows = new Integer[] {5, 7}; Integer[] scanColumns = new Integer[] {3, 4, 5}; - Long[] scanTimestamps = new Long[] {2l, 3L}; + Long[] scanTimestamps = new Long[] { 2L, 3L}; int scanMaxVersions = 2; put(ht, FAMILY, putRows, putColumns, putTimestamps); @@ -241,7 +241,7 @@ public class TestMultipleTimestamps { Integer[] scanRows = new Integer[] {3, 5, 7}; Integer[] scanColumns = new Integer[] {3, 4, 5}; - Long[] scanTimestamps = new Long[] {2l, 4L}; + Long[] scanTimestamps = new Long[] { 2L, 4L}; int scanMaxVersions = 5; put(ht, FAMILY, putRows1, putColumns1, putTimestamps1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index a06055ded2..f5ea10e28b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -261,9 +261,9 @@ public class TestReplicasClient { AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest( getRS().getServerName(), hri, null); AdminProtos.OpenRegionResponse responseOpen = getRS().getRSRpcServices().openRegion(null, orr); - Assert.assertEquals(responseOpen.getOpeningStateCount(), 1); - Assert.assertEquals(responseOpen.getOpeningState(0), - AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED); + Assert.assertEquals(1, responseOpen.getOpeningStateCount()); + Assert.assertEquals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED, + responseOpen.getOpeningState(0)); checkRegionIsOpened(hri); } @@ -583,8 +583,8 @@ public class TestReplicasClient { r = table.get(g); Assert.assertFalse(r.isStale()); Assert.assertFalse(r.getColumnCells(f, b1).isEmpty()); - Assert.assertEquals(hedgedReadOps.getCount(), 1); - Assert.assertEquals(hedgedReadWin.getCount(), 0); + Assert.assertEquals(1, hedgedReadOps.getCount()); + Assert.assertEquals(0, hedgedReadWin.getCount()); SlowMeCopro.sleepTime.set(0); SlowMeCopro.getSecondaryCdl().get().countDown(); LOG.info("hedged read occurred but not faster"); @@ -597,8 +597,8 @@ public class TestReplicasClient { r = table.get(g); Assert.assertTrue(r.isStale()); Assert.assertTrue(r.getColumnCells(f, b1).isEmpty()); - Assert.assertEquals(hedgedReadOps.getCount(), 2); - Assert.assertEquals(hedgedReadWin.getCount(), 1); + Assert.assertEquals(2, hedgedReadOps.getCount()); + Assert.assertEquals(1, hedgedReadWin.getCount()); SlowMeCopro.getPrimaryCdl().get().countDown(); LOG.info("hedged read occurred and faster"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index 3190fb9c9a..fe1cc9c225 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -205,7 +205,7 @@ public class TestRestoreSnapshotFromClient { HTableDescriptor htd = admin.getTableDescriptor(tableName); assertEquals(2, htd.getFamilies().size()); SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, TEST_FAMILY2); - long snapshot2Rows = snapshot1Rows + 500; + long snapshot2Rows = snapshot1Rows + 500L; assertEquals(snapshot2Rows, countRows(table)); assertEquals(500, countRows(table, TEST_FAMILY2)); Set fsFamilies = getFamiliesFromFS(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java index 33352985a6..b494895f04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java @@ -194,7 +194,7 @@ public class TestResult extends TestCase { loadValueBuffer.clear(); r.loadValue(family, qf, loadValueBuffer); loadValueBuffer.flip(); - assertEquals(ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i))), loadValueBuffer); + assertEquals(loadValueBuffer, ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i)))); assertEquals(ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i))), r.getValueAsByteBuffer(family, qf)); } @@ -219,7 +219,7 @@ public class TestResult extends TestCase { loadValueBuffer.clear(); r.loadValue(family, qf, loadValueBuffer); loadValueBuffer.flip(); - assertEquals(ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i))), loadValueBuffer); + assertEquals(loadValueBuffer, ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i)))); assertEquals(ByteBuffer.wrap(Bytes.add(value, Bytes.toBytes(i))), r.getValueAsByteBuffer(family, qf)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java index b1126e59f5..387253c0da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java @@ -142,7 +142,7 @@ public class TestServerBusyException { TEST_UTIL.shutdownMiniCluster(); } - private class TestPutThread extends Thread { + private static class TestPutThread extends Thread { Table table; int getServerBusyException = 0; @@ -163,7 +163,7 @@ public class TestServerBusyException { } } - private class TestGetThread extends Thread { + private static class TestGetThread extends Thread { Table table; int getServerBusyException = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java index 84bfa766be..41e303409f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java @@ -160,8 +160,8 @@ public class TestSizeFailures { * @return An entry where the first item is rows observed and the second is entries observed. */ private Entry sumTable(ResultScanner scanner) { - long rowsObserved = 0l; - long entriesObserved = 0l; + long rowsObserved = 0L; + long entriesObserved = 0L; // Read all the records in the table for (Result result : scanner) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSmallReversedScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSmallReversedScanner.java index b050397f57..4de6a76262 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSmallReversedScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSmallReversedScanner.java @@ -105,7 +105,7 @@ public class TestSmallReversedScanner { Assert.assertArrayEquals(r.getRow(), Bytes.toBytes(inputRowKeys[value])); } - Assert.assertEquals(value, 0); + Assert.assertEquals(0, value); } /** @@ -133,6 +133,6 @@ public class TestSmallReversedScanner { Assert.assertArrayEquals(r.getRow(), new byte[] { (char) 0x00 }); Assert.assertTrue(--count >= 0); } - Assert.assertEquals(count, 0); + Assert.assertEquals(0, count); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 1127a5e831..b077d64164 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -185,7 +185,7 @@ public class TestSnapshotFromClient { admin.deleteSnapshots(Pattern.compile("TableSnapshot.*")); List snapshots = admin.listSnapshots(); assertEquals(1, snapshots.size()); - assertEquals(snapshots.get(0).getName(), snapshot3); + assertEquals(snapshot3, snapshots.get(0).getName()); admin.deleteSnapshot(snapshot3); admin.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index 49c656067b..fa3715dfc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -212,8 +212,8 @@ public class TestSnapshotMetadata { cloneHtd.getValues().size()); assertEquals(originalTableDescriptor.getConfiguration().size(), cloneHtd.getConfiguration().size()); - assertEquals(cloneHtd.getValue(TEST_CUSTOM_VALUE), TEST_CUSTOM_VALUE); - assertEquals(cloneHtd.getConfigurationValue(TEST_CONF_CUSTOM_VALUE), TEST_CONF_CUSTOM_VALUE); + assertEquals(TEST_CUSTOM_VALUE, cloneHtd.getValue(TEST_CUSTOM_VALUE)); + assertEquals(TEST_CONF_CUSTOM_VALUE, cloneHtd.getConfigurationValue(TEST_CONF_CUSTOM_VALUE)); assertEquals(originalTableDescriptor.getValues(), cloneHtd.getValues()); assertEquals(originalTableDescriptor.getConfiguration(), cloneHtd.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java index df2a6e8a1f..9e543dcf5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithAcl.java @@ -177,7 +177,7 @@ public class TestSnapshotWithAcl extends SecureTestUtil { byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER); Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++)); } - Assert.assertEquals(rowCount, ROW_COUNT); + Assert.assertEquals(ROW_COUNT, rowCount); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index 678134bb40..d239eb8e3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -80,13 +80,13 @@ public class TestSplitOrMergeStatus { Admin admin = TEST_UTIL.getAdmin(); initSwitchStatus(admin); boolean[] results = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.SPLIT); - assertEquals(results.length, 1); + assertEquals(1, results.length); assertTrue(results[0]); admin.split(t.getName()); int count = admin.getTableRegions(tableName).size(); assertTrue(originalCount == count); results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT); - assertEquals(results.length, 1); + assertEquals(1, results.length); assertFalse(results[0]); admin.split(t.getName()); while ((count = admin.getTableRegions(tableName).size()) == originalCount) { @@ -117,7 +117,7 @@ public class TestSplitOrMergeStatus { // Merge switch is off so merge should NOT succeed. boolean[] results = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.MERGE); - assertEquals(results.length, 1); + assertEquals(1, results.length); assertTrue(results[0]); List regions = admin.getTableRegions(t.getName()); assertTrue(regions.size() > 1); @@ -134,7 +134,7 @@ public class TestSplitOrMergeStatus { results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE); regions = admin.getTableRegions(t.getName()); - assertEquals(results.length, 1); + assertEquals(1, results.length); assertFalse(results[0]); f = admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java index 89af5de61c..ea60ec2a38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java @@ -181,15 +181,15 @@ public class TestTimestampsFilter { Table ht = TEST_UTIL.createTable(TableName.valueOf(TABLE), FAMILIES, Integer.MAX_VALUE); Put p = new Put(Bytes.toBytes("row")); - p.addColumn(FAMILY, Bytes.toBytes("column0"), (long) 3, Bytes.toBytes("value0-3")); - p.addColumn(FAMILY, Bytes.toBytes("column1"), (long) 3, Bytes.toBytes("value1-3")); - p.addColumn(FAMILY, Bytes.toBytes("column2"), (long) 1, Bytes.toBytes("value2-1")); - p.addColumn(FAMILY, Bytes.toBytes("column2"), (long) 2, Bytes.toBytes("value2-2")); - p.addColumn(FAMILY, Bytes.toBytes("column2"), (long) 3, Bytes.toBytes("value2-3")); - p.addColumn(FAMILY, Bytes.toBytes("column3"), (long) 2, Bytes.toBytes("value3-2")); - p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 1, Bytes.toBytes("value4-1")); - p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 2, Bytes.toBytes("value4-2")); - p.addColumn(FAMILY, Bytes.toBytes("column4"), (long) 3, Bytes.toBytes("value4-3")); + p.addColumn(FAMILY, Bytes.toBytes("column0"), 3L, Bytes.toBytes("value0-3")); + p.addColumn(FAMILY, Bytes.toBytes("column1"), 3L, Bytes.toBytes("value1-3")); + p.addColumn(FAMILY, Bytes.toBytes("column2"), 1L, Bytes.toBytes("value2-1")); + p.addColumn(FAMILY, Bytes.toBytes("column2"), 2L, Bytes.toBytes("value2-2")); + p.addColumn(FAMILY, Bytes.toBytes("column2"), 3L, Bytes.toBytes("value2-3")); + p.addColumn(FAMILY, Bytes.toBytes("column3"), 2L, Bytes.toBytes("value3-2")); + p.addColumn(FAMILY, Bytes.toBytes("column4"), 1L, Bytes.toBytes("value4-1")); + p.addColumn(FAMILY, Bytes.toBytes("column4"), 2L, Bytes.toBytes("value4-2")); + p.addColumn(FAMILY, Bytes.toBytes("column4"), 3L, Bytes.toBytes("value4-3")); ht.put(p); ArrayList timestamps = new ArrayList<>(); @@ -209,7 +209,7 @@ public class TestTimestampsFilter { + Bytes.toString(CellUtil.cloneValue(kv))); } - assertEquals(result.listCells().size(), 2); + assertEquals(2, result.listCells().size()); assertTrue(CellUtil.matchingValue(result.listCells().get(0), Bytes.toBytes("value2-3"))); assertTrue(CellUtil.matchingValue(result.listCells().get(1), Bytes.toBytes("value4-3"))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java index 6511a42ce3..3f4029a177 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java @@ -70,7 +70,7 @@ public class TestUpdateConfiguration { admin.updateConfiguration(server); Configuration conf = TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration(); int custom = conf.getInt("hbase.custom.config", 0); - assertEquals(custom, 1000); + assertEquals(1000, custom); // restore hbase-site.xml Files.copy(cnf3Path, cnfPath, StandardCopyOption.REPLACE_EXISTING); } @@ -97,17 +97,20 @@ public class TestUpdateConfiguration { admin.updateConfiguration(); // Check the configuration of the Masters - Configuration masterConfiguration = TEST_UTIL.getMiniHBaseCluster().getMaster(0).getConfiguration(); + Configuration masterConfiguration = + TEST_UTIL.getMiniHBaseCluster().getMaster(0).getConfiguration(); int custom = masterConfiguration.getInt("hbase.custom.config", 0); - assertEquals(custom, 1000); - Configuration backupMasterConfiguration = TEST_UTIL.getMiniHBaseCluster().getMaster(1).getConfiguration(); + assertEquals(1000, custom); + Configuration backupMasterConfiguration = + TEST_UTIL.getMiniHBaseCluster().getMaster(1).getConfiguration(); custom = backupMasterConfiguration.getInt("hbase.custom.config", 0); - assertEquals(custom, 1000); + assertEquals(1000, custom); // Check the configuration of the RegionServer - Configuration regionServerConfiguration = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration(); + Configuration regionServerConfiguration = + TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration(); custom = regionServerConfiguration.getInt("hbase.custom.config", 0); - assertEquals(custom, 1000); + assertEquals(1000, custom); // restore hbase-site.xml Files.copy(cnf3Path, cnfPath, StandardCopyOption.REPLACE_EXISTING); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java index 3b7fd84e3d..82eda2a231 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java @@ -88,7 +88,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin1.disableTableReplication(tableName); table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { - assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_LOCAL); + assertEquals(HConstants.REPLICATION_SCOPE_LOCAL, fam.getScope()); } } @@ -119,7 +119,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin1.enableTableReplication(tableName); table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { - assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL); + assertEquals(HConstants.REPLICATION_SCOPE_GLOBAL, fam.getScope()); } } @@ -144,7 +144,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin1.enableTableReplication(tableName); table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { - assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL); + assertEquals(HConstants.REPLICATION_SCOPE_GLOBAL, fam.getScope()); } } @@ -153,12 +153,12 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { admin1.disableTableReplication(tableName); HTableDescriptor table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { - assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_LOCAL); + assertEquals(HConstants.REPLICATION_SCOPE_LOCAL, fam.getScope()); } admin1.enableTableReplication(tableName); table = admin1.getTableDescriptor(tableName); for (HColumnDescriptor fam : table.getColumnFamilies()) { - assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL); + assertEquals(HConstants.REPLICATION_SCOPE_GLOBAL, fam.getScope()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java index 1f8dbc40fc..06e1a359ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/conf/TestConfigurationManager.java @@ -42,6 +42,7 @@ public class TestConfigurationManager { register(); } + @Override public void onConfigurationChange(Configuration conf) { notifiedOnChange = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java index 35bcd77b15..d7c70da240 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java @@ -85,7 +85,7 @@ public class TestConstraint { try { // test that we don't fail on a valid put Put put = new Put(row1); - byte[] value = Integer.toString(10).getBytes(); + byte[] value = Bytes.toBytes(Integer.toString(10)); byte[] qualifier = new byte[0]; put.addColumn(dummy, qualifier, value); table.put(put); @@ -119,7 +119,7 @@ public class TestConstraint { // test that we do fail on violation Put put = new Put(row1); byte[] qualifier = new byte[0]; - put.addColumn(dummy, qualifier, "fail".getBytes()); + put.addColumn(dummy, qualifier, Bytes.toBytes("fail")); LOG.warn("Doing put in table"); try { table.put(put); @@ -158,7 +158,7 @@ public class TestConstraint { // test that we don't fail because its disabled Put put = new Put(row1); byte[] qualifier = new byte[0]; - put.addColumn(dummy, qualifier, "pass".getBytes()); + put.addColumn(dummy, qualifier, Bytes.toBytes("pass")); table.put(put); } finally { table.close(); @@ -191,7 +191,7 @@ public class TestConstraint { // test that we do fail on violation Put put = new Put(row1); byte[] qualifier = new byte[0]; - put.addColumn(dummy, qualifier, "pass".getBytes()); + put.addColumn(dummy, qualifier, Bytes.toBytes("pass")); LOG.warn("Doing put in table"); table.put(put); } finally { @@ -224,7 +224,7 @@ public class TestConstraint { // test that we do fail on violation Put put = new Put(row1); byte[] qualifier = new byte[0]; - put.addColumn(dummy, qualifier, "pass".getBytes()); + put.addColumn(dummy, qualifier, Bytes.toBytes("pass")); try{ table.put(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java index f3d90f6d61..1da31dadb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java @@ -123,7 +123,8 @@ public class SampleRegionWALCoprocessor implements WALCoprocessor, RegionCoproce if (Arrays.equals(family, changedFamily) && Arrays.equals(qulifier, changedQualifier)) { LOG.debug("Found the KeyValue from WALEdit which should be changed."); - cell.getValueArray()[cell.getValueOffset()] += 1; + cell.getValueArray()[cell.getValueOffset()] = + (byte) (cell.getValueArray()[cell.getValueOffset()] + 1); } } if (null != row) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 1394dbd55d..0d864b628e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -558,7 +558,7 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(familyPaths); assertEquals(1,familyPaths.size()); - assertArrayEquals(familyPaths.get(0).getFirst(), TestRegionObserverInterface.A); + assertArrayEquals(TestRegionObserverInterface.A, familyPaths.get(0).getFirst()); String familyPath = familyPaths.get(0).getSecond(); String familyName = Bytes.toString(TestRegionObserverInterface.A); assertEquals(familyPath.substring(familyPath.length()-familyName.length()-1),"/"+familyName); @@ -577,7 +577,7 @@ public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(familyPaths); assertEquals(1,familyPaths.size()); - assertArrayEquals(familyPaths.get(0).getFirst(), TestRegionObserverInterface.A); + assertArrayEquals(TestRegionObserverInterface.A, familyPaths.get(0).getFirst()); String familyPath = familyPaths.get(0).getSecond(); String familyName = Bytes.toString(TestRegionObserverInterface.A); assertEquals(familyPath.substring(familyPath.length()-familyName.length()-1),"/"+familyName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java index 27865f7088..6e6c34b383 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java @@ -108,12 +108,10 @@ public class TestCoprocessorConfiguration { tableCoprocessorLoaded.set(false); new RegionCoprocessorHost(region, rsServices, conf); assertEquals("System coprocessors loading default was not honored", - systemCoprocessorLoaded.get(), - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); assertEquals("Table coprocessors loading default was not honored", - tableCoprocessorLoaded.get(), - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED && - CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED && + CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED, tableCoprocessorLoaded.get()); } @Test @@ -123,8 +121,7 @@ public class TestCoprocessorConfiguration { systemCoprocessorLoaded.set(false); new RegionServerCoprocessorHost(rsServices, conf); assertEquals("System coprocessors loading default was not honored", - systemCoprocessorLoaded.get(), - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); } @Test @@ -134,8 +131,7 @@ public class TestCoprocessorConfiguration { systemCoprocessorLoaded.set(false); new MasterCoprocessorHost(masterServices, conf); assertEquals("System coprocessors loading default was not honored", - systemCoprocessorLoaded.get(), - CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java index 33a488ff17..de697f144a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java @@ -34,7 +34,7 @@ public class TestCoprocessorHost { /** * An {@link Abortable} implementation for tests. */ - private class TestAbortable implements Abortable { + private static class TestAbortable implements Abortable { private volatile boolean aborted = false; @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java index 2a353658a2..bd0efd85e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java @@ -93,7 +93,7 @@ public class TestMasterCoprocessorExceptionWithAbort { fail("BuggyMasterObserver failed to throw an exception."); } catch (IOException e) { assertEquals("HBaseAdmin threw an interrupted IOException as expected.", - e.getClass().getName(), "java.io.InterruptedIOException"); + "java.io.InterruptedIOException", e.getClass().getName()); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 431e73eb7c..ea817ff5e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -932,6 +932,7 @@ public class TestMasterObserver { return preModifyTableActionCalled && !postCompletedModifyTableActionCalled; } + @Override public void preEnableTableAction( final ObserverContext ctx, final TableName tableName) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 166dfdd596..09aa4ff41a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -341,6 +341,7 @@ public class TestWALObserver { User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime"); user.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { Path p = runWALSplit(newConf); LOG.info("WALSplit path == " + p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java index b0b17f9718..a3f2f1cecc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/executor/TestExecutorService.java @@ -145,7 +145,7 @@ public class TestExecutorService { } public static class TestEventHandler extends EventHandler { - private AtomicBoolean lock; + private final AtomicBoolean lock; private AtomicInteger counter; public TestEventHandler(Server server, EventType eventType, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java index c31eebfd98..0e5fdb2420 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestBitComparator.java @@ -103,13 +103,13 @@ public class TestBitComparator { private void testOperation(byte[] data, byte[] comparatorBytes, BitComparator.BitwiseOp operator, int expected) { BitComparator comparator = new BitComparator(comparatorBytes, operator); - assertEquals(comparator.compareTo(data), expected); + assertEquals(expected, comparator.compareTo(data)); } private void testOperation(ByteBuffer data, byte[] comparatorBytes, BitComparator.BitwiseOp operator, int expected) { BitComparator comparator = new BitComparator(comparatorBytes, operator); - assertEquals(comparator.compareTo(data, 0, data.capacity()), expected); + assertEquals(expected, comparator.compareTo(data, 0, data.capacity())); } @Test @@ -142,13 +142,13 @@ public class TestBitComparator { private void testOperationWithOffset(byte[] data, byte[] comparatorBytes, BitComparator.BitwiseOp operator, int expected) { BitComparator comparator = new BitComparator(comparatorBytes, operator); - assertEquals(comparator.compareTo(data, 1, comparatorBytes.length), expected); + assertEquals(expected, comparator.compareTo(data, 1, comparatorBytes.length)); } private void testOperationWithOffset(ByteBuffer data, byte[] comparatorBytes, BitComparator.BitwiseOp operator, int expected) { BitComparator comparator = new BitComparator(comparatorBytes, operator); - assertEquals(comparator.compareTo(data, 1, comparatorBytes.length), expected); + assertEquals(expected, comparator.compareTo(data, 1, comparatorBytes.length)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index ca2c88bbde..ec11ce0fd5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -136,8 +136,8 @@ public class TestFilter { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; - private long numRows = ROWS_ONE.length + ROWS_TWO.length; - private long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length; + private long numRows = (long) ROWS_ONE.length + ROWS_TWO.length; + private long colsPerRow = (long) FAMILIES.length * QUALIFIERS_ONE.length; @Before public void setUp() throws Exception { @@ -1756,15 +1756,14 @@ public class TestFilter { assertTrue("Qualifier mismatch", CellUtil.matchingQualifier(kv, kvs[idx])); assertFalse("Should not have returned whole value", CellUtil.matchingValue(kv, kvs[idx])); if (useLen) { - assertEquals("Value in result is not SIZEOF_INT", - kv.getValueLength(), Bytes.SIZEOF_INT); + assertEquals("Value in result is not SIZEOF_INT", Bytes.SIZEOF_INT, kv.getValueLength()); LOG.info("idx = " + idx + ", len=" + kvs[idx].getValueLength() + ", actual=" + Bytes.toInt(CellUtil.cloneValue(kv))); assertEquals("Scan value should be the length of the actual value. ", kvs[idx].getValueLength(), Bytes.toInt(CellUtil.cloneValue(kv)) ); LOG.info("good"); } else { - assertEquals("Value in result is not empty", kv.getValueLength(), 0); + assertEquals("Value in result is not empty", 0, kv.getValueLength()); } idx++; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java index fdd7e77056..ad5ee997cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java @@ -136,6 +136,7 @@ public class TestFilterFromRegionSide { public static class FirstSeveralCellsFilter extends FilterBase{ private int count = 0; + @Override public void reset() { count = 0; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java index 6574d04de3..2d2a42590b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterList.java @@ -768,7 +768,7 @@ public class TestFilterList { MockFilter filter5 = new MockFilter(ReturnCode.SKIP); MockFilter filter6 = new MockFilter(ReturnCode.SEEK_NEXT_USING_HINT); FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, filter1, filter2); - assertEquals(filterList.filterCell(kv1), ReturnCode.INCLUDE); + assertEquals(ReturnCode.INCLUDE, filterList.filterCell(kv1)); filterList = new FilterList(Operator.MUST_PASS_ONE, filter2, filter3); assertEquals(ReturnCode.INCLUDE_AND_NEXT_COL, filterList.filterCell(kv1)); @@ -936,6 +936,7 @@ public class TestFilterList { private static class MockNextRowFilter extends FilterBase { private int hitCount = 0; + @Override public ReturnCode filterCell(final Cell v) throws IOException { hitCount++; return ReturnCode.NEXT_ROW; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java index 8fa41e32ca..d470face38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java @@ -301,8 +301,8 @@ public class TestFilterSerialization { // Non-empty timestamp list LinkedList list = new LinkedList<>(); - list.add(new Long(System.currentTimeMillis())); - list.add(new Long(System.currentTimeMillis())); + list.add(System.currentTimeMillis()); + list.add(System.currentTimeMillis()); timestampsFilter = new TimestampsFilter(list); assertTrue(timestampsFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java index 5f25b491dd..25ea358715 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java @@ -86,7 +86,7 @@ public class TestFuzzyRowFilterEndToEnd { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); // set no splits - conf.setLong(HConstants.HREGION_MAX_FILESIZE, ((long) 1024) * 1024 * 1024 * 10); + conf.setLong(HConstants.HREGION_MAX_FILESIZE, (1024L) * 1024 * 1024 * 10); TEST_UTIL.startMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java index ebccc3455d..768ab7a03d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java @@ -162,20 +162,24 @@ public class TestInvocationRecordFilter { private List visitedKeyValues = new ArrayList<>(); + @Override public void reset() { visitedKeyValues.clear(); } + @Override public ReturnCode filterCell(final Cell ignored) { visitedKeyValues.add(ignored); return ReturnCode.INCLUDE; } + @Override public void filterRowCells(List kvs) { kvs.clear(); kvs.addAll(visitedKeyValues); } + @Override public boolean hasFilterRow() { return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java index bcd239d18d..c5200f98aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestParseFilter.java @@ -18,9 +18,12 @@ */ package org.apache.hadoop.hbase.filter; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -89,7 +92,7 @@ public class TestParseFilter { String filterString = " PrefixFilter('row' ) "; PrefixFilter prefixFilter = doTestFilter(filterString, PrefixFilter.class); byte [] prefix = prefixFilter.getPrefix(); - assertEquals(new String(prefix), "row"); + assertEquals("row", new String(prefix, StandardCharsets.UTF_8)); filterString = " PrefixFilter(row)"; @@ -107,7 +110,7 @@ public class TestParseFilter { ColumnPrefixFilter columnPrefixFilter = doTestFilter(filterString, ColumnPrefixFilter.class); byte [] columnPrefix = columnPrefixFilter.getPrefix(); - assertEquals(new String(columnPrefix), "qualifier"); + assertEquals("qualifier", new String(columnPrefix, StandardCharsets.UTF_8)); } @Test @@ -116,8 +119,8 @@ public class TestParseFilter { MultipleColumnPrefixFilter multipleColumnPrefixFilter = doTestFilter(filterString, MultipleColumnPrefixFilter.class); byte [][] prefixes = multipleColumnPrefixFilter.getPrefix(); - assertEquals(new String(prefixes[0]), "qualifier1"); - assertEquals(new String(prefixes[1]), "qualifier2"); + assertEquals("qualifier1", new String(prefixes[0], StandardCharsets.UTF_8)); + assertEquals("qualifier2", new String(prefixes[1], StandardCharsets.UTF_8)); } @Test @@ -126,7 +129,7 @@ public class TestParseFilter { ColumnCountGetFilter columnCountGetFilter = doTestFilter(filterString, ColumnCountGetFilter.class); int limit = columnCountGetFilter.getLimit(); - assertEquals(limit, 4); + assertEquals(4, limit); filterString = " ColumnCountGetFilter('abc')"; try { @@ -151,7 +154,7 @@ public class TestParseFilter { PageFilter pageFilter = doTestFilter(filterString, PageFilter.class); long pageSize = pageFilter.getPageSize(); - assertEquals(pageSize, 4); + assertEquals(4, pageSize); filterString = " PageFilter('123')"; try { @@ -168,9 +171,9 @@ public class TestParseFilter { ColumnPaginationFilter columnPaginationFilter = doTestFilter(filterString, ColumnPaginationFilter.class); int limit = columnPaginationFilter.getLimit(); - assertEquals(limit, 4); + assertEquals(4, limit); int offset = columnPaginationFilter.getOffset(); - assertEquals(offset, 6); + assertEquals(6, offset); filterString = " ColumnPaginationFilter('124')"; try { @@ -203,7 +206,7 @@ public class TestParseFilter { InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); - assertEquals(new String(stopRowKey), "row 3"); + assertEquals("row 3", new String(stopRowKey, StandardCharsets.UTF_8)); } @@ -213,13 +216,13 @@ public class TestParseFilter { TimestampsFilter timestampsFilter = doTestFilter(filterString, TimestampsFilter.class); List timestamps = timestampsFilter.getTimestamps(); - assertEquals(timestamps.size(), 2); - assertEquals(timestamps.get(0), new Long(6)); + assertEquals(2, timestamps.size()); + assertEquals(Long.valueOf(6), timestamps.get(0)); filterString = "TimestampsFilter()"; timestampsFilter = doTestFilter(filterString, TimestampsFilter.class); timestamps = timestampsFilter.getTimestamps(); - assertEquals(timestamps.size(), 0); + assertEquals(0, timestamps.size()); filterString = "TimestampsFilter(9223372036854775808, 6)"; try { @@ -246,7 +249,7 @@ public class TestParseFilter { assertEquals(CompareOperator.EQUAL, rowFilter.getCompareOperator()); assertTrue(rowFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) rowFilter.getComparator(); - assertEquals("regionse", new String(binaryComparator.getValue())); + assertEquals("regionse", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -258,7 +261,7 @@ public class TestParseFilter { assertTrue(familyFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator = (BinaryPrefixComparator) familyFilter.getComparator(); - assertEquals("pre", new String(binaryPrefixComparator.getValue())); + assertEquals("pre", new String(binaryPrefixComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -270,7 +273,7 @@ public class TestParseFilter { assertTrue(qualifierFilter.getComparator() instanceof RegexStringComparator); RegexStringComparator regexStringComparator = (RegexStringComparator) qualifierFilter.getComparator(); - assertEquals("pre*", new String(regexStringComparator.getValue())); + assertEquals("pre*", new String(regexStringComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -282,7 +285,7 @@ public class TestParseFilter { assertTrue(valueFilter.getComparator() instanceof SubstringComparator); SubstringComparator substringComparator = (SubstringComparator) valueFilter.getComparator(); - assertEquals("pre", new String(substringComparator.getValue())); + assertEquals("pre", new String(substringComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -290,8 +293,8 @@ public class TestParseFilter { String filterString = "ColumnRangeFilter('abc', true, 'xyz', false)"; ColumnRangeFilter columnRangeFilter = doTestFilter(filterString, ColumnRangeFilter.class); - assertEquals("abc", new String(columnRangeFilter.getMinColumn())); - assertEquals("xyz", new String(columnRangeFilter.getMaxColumn())); + assertEquals("abc", new String(columnRangeFilter.getMinColumn(), StandardCharsets.UTF_8)); + assertEquals("xyz", new String(columnRangeFilter.getMaxColumn(), StandardCharsets.UTF_8)); assertTrue(columnRangeFilter.isMinColumnInclusive()); assertFalse(columnRangeFilter.isMaxColumnInclusive()); } @@ -301,13 +304,14 @@ public class TestParseFilter { String filterString = "DependentColumnFilter('family', 'qualifier', true, =, 'binary:abc')"; DependentColumnFilter dependentColumnFilter = doTestFilter(filterString, DependentColumnFilter.class); - assertEquals("family", new String(dependentColumnFilter.getFamily())); - assertEquals("qualifier", new String(dependentColumnFilter.getQualifier())); + assertEquals("family", new String(dependentColumnFilter.getFamily(), StandardCharsets.UTF_8)); + assertEquals("qualifier", + new String(dependentColumnFilter.getQualifier(), StandardCharsets.UTF_8)); assertTrue(dependentColumnFilter.getDropDependentColumn()); assertEquals(CompareOperator.EQUAL, dependentColumnFilter.getCompareOperator()); assertTrue(dependentColumnFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator)dependentColumnFilter.getComparator(); - assertEquals("abc", new String(binaryComparator.getValue())); + assertEquals("abc", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -316,25 +320,27 @@ public class TestParseFilter { "('family', 'qualifier', >=, 'binary:a', true, false)"; SingleColumnValueFilter singleColumnValueFilter = doTestFilter(filterString, SingleColumnValueFilter.class); - assertEquals("family", new String(singleColumnValueFilter.getFamily())); - assertEquals("qualifier", new String(singleColumnValueFilter.getQualifier())); - assertEquals(singleColumnValueFilter.getCompareOperator(), CompareOperator.GREATER_OR_EQUAL); + assertEquals("family", new String(singleColumnValueFilter.getFamily(), StandardCharsets.UTF_8)); + assertEquals("qualifier", + new String(singleColumnValueFilter.getQualifier(), StandardCharsets.UTF_8)); + assertEquals(CompareOperator.GREATER_OR_EQUAL, singleColumnValueFilter.getCompareOperator()); assertTrue(singleColumnValueFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) singleColumnValueFilter.getComparator(); - assertEquals(new String(binaryComparator.getValue()), "a"); + assertEquals("a", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); assertTrue(singleColumnValueFilter.getFilterIfMissing()); assertFalse(singleColumnValueFilter.getLatestVersionOnly()); filterString = "SingleColumnValueFilter ('family', 'qualifier', >, 'binaryprefix:a')"; singleColumnValueFilter = doTestFilter(filterString, SingleColumnValueFilter.class); - assertEquals("family", new String(singleColumnValueFilter.getFamily())); - assertEquals("qualifier", new String(singleColumnValueFilter.getQualifier())); - assertEquals(singleColumnValueFilter.getCompareOperator(), CompareOperator.GREATER); + assertEquals("family", new String(singleColumnValueFilter.getFamily(), StandardCharsets.UTF_8)); + assertEquals("qualifier", + new String(singleColumnValueFilter.getQualifier(), StandardCharsets.UTF_8)); + assertEquals(CompareOperator.GREATER, singleColumnValueFilter.getCompareOperator()); assertTrue(singleColumnValueFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator = (BinaryPrefixComparator) singleColumnValueFilter.getComparator(); - assertEquals(new String(binaryPrefixComparator.getValue()), "a"); + assertEquals("a", new String(binaryPrefixComparator.getValue(), StandardCharsets.UTF_8)); assertFalse(singleColumnValueFilter.getFilterIfMissing()); assertTrue(singleColumnValueFilter.getLatestVersionOnly()); } @@ -345,10 +351,13 @@ public class TestParseFilter { "SingleColumnValueExcludeFilter ('family', 'qualifier', <, 'binaryprefix:a')"; SingleColumnValueExcludeFilter singleColumnValueExcludeFilter = doTestFilter(filterString, SingleColumnValueExcludeFilter.class); - assertEquals(singleColumnValueExcludeFilter.getCompareOperator(), CompareOperator.LESS); - assertEquals("family", new String(singleColumnValueExcludeFilter.getFamily())); - assertEquals("qualifier", new String(singleColumnValueExcludeFilter.getQualifier())); - assertEquals(new String(singleColumnValueExcludeFilter.getComparator().getValue()), "a"); + assertEquals(CompareOperator.LESS, singleColumnValueExcludeFilter.getCompareOperator()); + assertEquals("family", + new String(singleColumnValueExcludeFilter.getFamily(), StandardCharsets.UTF_8)); + assertEquals("qualifier", + new String(singleColumnValueExcludeFilter.getQualifier(), StandardCharsets.UTF_8)); + assertEquals("a", new String(singleColumnValueExcludeFilter.getComparator().getValue(), + StandardCharsets.UTF_8)); assertFalse(singleColumnValueExcludeFilter.getFilterIfMissing()); assertTrue(singleColumnValueExcludeFilter.getLatestVersionOnly()); @@ -356,14 +365,16 @@ public class TestParseFilter { "('family', 'qualifier', <=, 'binaryprefix:a', true, false)"; singleColumnValueExcludeFilter = doTestFilter(filterString, SingleColumnValueExcludeFilter.class); - assertEquals("family", new String(singleColumnValueExcludeFilter.getFamily())); - assertEquals("qualifier", new String(singleColumnValueExcludeFilter.getQualifier())); - assertEquals(singleColumnValueExcludeFilter.getCompareOperator(), - CompareOperator.LESS_OR_EQUAL); + assertEquals("family", + new String(singleColumnValueExcludeFilter.getFamily(), StandardCharsets.UTF_8)); + assertEquals("qualifier", + new String(singleColumnValueExcludeFilter.getQualifier(), StandardCharsets.UTF_8)); + assertEquals(CompareOperator.LESS_OR_EQUAL, + singleColumnValueExcludeFilter.getCompareOperator()); assertTrue(singleColumnValueExcludeFilter.getComparator() instanceof BinaryPrefixComparator); BinaryPrefixComparator binaryPrefixComparator = (BinaryPrefixComparator) singleColumnValueExcludeFilter.getComparator(); - assertEquals(new String(binaryPrefixComparator.getValue()), "a"); + assertEquals("a", new String(binaryPrefixComparator.getValue(), StandardCharsets.UTF_8)); assertTrue(singleColumnValueExcludeFilter.getFilterIfMissing()); assertFalse(singleColumnValueExcludeFilter.getLatestVersionOnly()); } @@ -379,7 +390,7 @@ public class TestParseFilter { assertEquals(CompareOperator.EQUAL, valueFilter.getCompareOperator()); assertTrue(valueFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) valueFilter.getComparator(); - assertEquals("0", new String(binaryComparator.getValue())); + assertEquals("0", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -393,7 +404,7 @@ public class TestParseFilter { assertEquals(CompareOperator.NOT_EQUAL, rowFilter.getCompareOperator()); assertTrue(rowFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) rowFilter.getComparator(); - assertEquals("row1", new String(binaryComparator.getValue())); + assertEquals("row1", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -407,7 +418,7 @@ public class TestParseFilter { assertTrue(filters.get(1) instanceof FirstKeyOnlyFilter); PrefixFilter PrefixFilter = (PrefixFilter) filters.get(0); byte [] prefix = PrefixFilter.getPrefix(); - assertEquals(new String(prefix), "realtime"); + assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); FirstKeyOnlyFilter firstKeyOnlyFilter = (FirstKeyOnlyFilter) filters.get(1); } @@ -420,7 +431,7 @@ public class TestParseFilter { ArrayList filterListFilters = (ArrayList) filterList.getFilters(); assertTrue(filterListFilters.get(0) instanceof FilterList); assertTrue(filterListFilters.get(1) instanceof FamilyFilter); - assertEquals(filterList.getOperator(), FilterList.Operator.MUST_PASS_ONE); + assertEquals(FilterList.Operator.MUST_PASS_ONE, filterList.getOperator()); filterList = (FilterList) filterListFilters.get(0); FamilyFilter familyFilter = (FamilyFilter) filterListFilters.get(1); @@ -428,22 +439,22 @@ public class TestParseFilter { filterListFilters = (ArrayList)filterList.getFilters(); assertTrue(filterListFilters.get(0) instanceof PrefixFilter); assertTrue(filterListFilters.get(1) instanceof QualifierFilter); - assertEquals(filterList.getOperator(), FilterList.Operator.MUST_PASS_ALL); + assertEquals(FilterList.Operator.MUST_PASS_ALL, filterList.getOperator()); assertEquals(CompareOperator.EQUAL, familyFilter.getCompareOperator()); assertTrue(familyFilter.getComparator() instanceof BinaryComparator); BinaryComparator binaryComparator = (BinaryComparator) familyFilter.getComparator(); - assertEquals("qualifier", new String(binaryComparator.getValue())); + assertEquals("qualifier", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); PrefixFilter prefixFilter = (PrefixFilter) filterListFilters.get(0); byte [] prefix = prefixFilter.getPrefix(); - assertEquals(new String(prefix), "realtime"); + assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); QualifierFilter qualifierFilter = (QualifierFilter) filterListFilters.get(1); assertEquals(CompareOperator.GREATER_OR_EQUAL, qualifierFilter.getCompareOperator()); assertTrue(qualifierFilter.getComparator() instanceof BinaryComparator); binaryComparator = (BinaryComparator) qualifierFilter.getComparator(); - assertEquals("e", new String(binaryComparator.getValue())); + assertEquals("e", new String(binaryComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -466,7 +477,7 @@ public class TestParseFilter { ColumnPrefixFilter columnPrefixFilter = (ColumnPrefixFilter) filters.get(0); byte [] columnPrefix = columnPrefixFilter.getPrefix(); - assertEquals(new String(columnPrefix), "realtime"); + assertEquals("realtime", new String(columnPrefix, StandardCharsets.UTF_8)); FirstKeyOnlyFilter firstKeyOnlyFilter = (FirstKeyOnlyFilter) filters.get(1); @@ -477,7 +488,7 @@ public class TestParseFilter { assertTrue(familyFilter.getComparator() instanceof SubstringComparator); SubstringComparator substringComparator = (SubstringComparator) familyFilter.getComparator(); - assertEquals("hihi", new String(substringComparator.getValue())); + assertEquals("hihi", new String(substringComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -497,7 +508,7 @@ public class TestParseFilter { SkipFilter skipFilter = (SkipFilter) filters.get(2); byte [] columnPrefix = columnPrefixFilter.getPrefix(); - assertEquals(new String(columnPrefix), "realtime"); + assertEquals("realtime", new String(columnPrefix, StandardCharsets.UTF_8)); assertTrue(skipFilter.getFilter() instanceof FamilyFilter); FamilyFilter familyFilter = (FamilyFilter) skipFilter.getFilter(); @@ -506,7 +517,7 @@ public class TestParseFilter { assertTrue(familyFilter.getComparator() instanceof SubstringComparator); SubstringComparator substringComparator = (SubstringComparator) familyFilter.getComparator(); - assertEquals("hihi", new String(substringComparator.getValue())); + assertEquals("hihi", new String(substringComparator.getValue(), StandardCharsets.UTF_8)); } @Test @@ -537,7 +548,7 @@ public class TestParseFilter { } @Test - public void testIncorrectComparatorType () throws IOException { + public void testIncorrectComparatorType() throws IOException { String filterString = "RowFilter ('>=' , 'binaryoperator:region')"; try { doTestFilter(filterString, RowFilter.class); @@ -584,7 +595,7 @@ public class TestParseFilter { PrefixFilter prefixFilter = (PrefixFilter)filters.get(0); byte [] prefix = prefixFilter.getPrefix(); - assertEquals(new String(prefix), "realtime"); + assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); } @Test @@ -606,41 +617,40 @@ public class TestParseFilter { PrefixFilter prefixFilter = (PrefixFilter)filters.get(0); byte [] prefix = prefixFilter.getPrefix(); - assertEquals(new String(prefix), "realtime"); + assertEquals("realtime", new String(prefix, StandardCharsets.UTF_8)); SkipFilter skipFilter = (SkipFilter)filters.get(1); assertTrue(skipFilter.getFilter() instanceof FirstKeyOnlyFilter); } @Test - public void testUnescapedQuote1 () throws IOException { + public void testUnescapedQuote1() throws IOException { String filterString = "InclusiveStopFilter ('row''3')"; InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); - assertEquals(new String(stopRowKey), "row'3"); + assertEquals("row'3", new String(stopRowKey, StandardCharsets.UTF_8)); } @Test - public void testUnescapedQuote2 () throws IOException { + public void testUnescapedQuote2() throws IOException { String filterString = "InclusiveStopFilter ('row''3''')"; InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); - assertEquals(new String(stopRowKey), "row'3'"); + assertEquals("row'3'", new String(stopRowKey, StandardCharsets.UTF_8)); } @Test - public void testUnescapedQuote3 () throws IOException { + public void testUnescapedQuote3() throws IOException { String filterString = " InclusiveStopFilter ('''')"; - InclusiveStopFilter inclusiveStopFilter = - doTestFilter(filterString, InclusiveStopFilter.class); + InclusiveStopFilter inclusiveStopFilter = doTestFilter(filterString, InclusiveStopFilter.class); byte [] stopRowKey = inclusiveStopFilter.getStopRowKey(); - assertEquals(new String(stopRowKey), "'"); + assertEquals("'", new String(stopRowKey, StandardCharsets.UTF_8)); } @Test - public void testIncorrectFilterString () throws IOException { + public void testIncorrectFilterString() throws IOException { String filterString = "()"; byte [] filterStringAsByteArray = Bytes.toBytes(filterString); try { @@ -652,10 +662,9 @@ public class TestParseFilter { } @Test - public void testCorrectFilterString () throws IOException { + public void testCorrectFilterString() throws IOException { String filterString = "(FirstKeyOnlyFilter())"; - FirstKeyOnlyFilter firstKeyOnlyFilter = - doTestFilter(filterString, FirstKeyOnlyFilter.class); + FirstKeyOnlyFilter firstKeyOnlyFilter = doTestFilter(filterString, FirstKeyOnlyFilter.class); } @Test @@ -665,7 +674,8 @@ public class TestParseFilter { assertTrue(f.getSupportedFilters().contains("MyFilter")); } - private T doTestFilter(String filterString, Class clazz) throws IOException { + private T doTestFilter(String filterString, Class clazz) + throws IOException { byte [] filterStringAsByteArray = Bytes.toBytes(filterString); filter = f.parseFilterString(filterStringAsByteArray); assertEquals(clazz, filter.getClass()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java index 764d0336bf..c6b1b5fec3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestSingleColumnValueExcludeFilter.java @@ -68,7 +68,7 @@ public class TestSingleColumnValueExcludeFilter { filter.filterRowCells(kvs); - assertEquals("resultSize", kvs.size(), 2); + assertEquals("resultSize", 2, kvs.size()); assertTrue("leftKV1", CellComparatorImpl.COMPARATOR.compare(kvs.get(0), c) == 0); assertTrue("leftKV2", CellComparatorImpl.COMPARATOR.compare(kvs.get(1), c) == 0); assertFalse("allRemainingWhenMatch", filter.filterAllRemaining()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index b24d30bdce..5ba7dfa54a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hdfs.DFSClient; @@ -248,7 +249,7 @@ public class TestBlockReorder { */ @Test() public void testHBaseCluster() throws Exception { - byte[] sb = "sb".getBytes(); + byte[] sb = Bytes.toBytes("sb"); htu.startMiniZKCluster(); MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1); @@ -442,7 +443,7 @@ public class TestBlockReorder { do { l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1); Assert.assertNotNull(l.getLocatedBlocks()); - Assert.assertEquals(l.getLocatedBlocks().size(), 1); + Assert.assertEquals(1, l.getLocatedBlocks().size()); Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length, System.currentTimeMillis() < max); } while (l.get(0).getLocations().length != repCount); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java index 33bac39f25..7747bdb09d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java @@ -125,7 +125,7 @@ public class TestHFileLink { HFileLink.parseBackReferenceName(encodedRegion+"."+ tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); assertEquals(parsedRef.getFirst(), tableName); - assertEquals(parsedRef.getSecond(), encodedRegion); + assertEquals(encodedRegion, parsedRef.getSecond()); //verify resolving back reference Path storeFileDir = new Path(refLinkDir, encodedRegion+"."+ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 94df090049..b3148c71de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -546,9 +546,9 @@ public class TestHeapSize { assertTrue(ClassSize.OBJECT == 12 || ClassSize.OBJECT == 16); // depending on CompressedOops } if (ClassSize.useUnsafeLayout()) { - assertEquals(ClassSize.OBJECT + 4, ClassSize.ARRAY); + assertEquals(ClassSize.ARRAY, ClassSize.OBJECT + 4); } else { - assertEquals(ClassSize.OBJECT + 8, ClassSize.ARRAY); + assertEquals(ClassSize.ARRAY, ClassSize.OBJECT + 8); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java index f43f147eea..82a50c481d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java @@ -72,16 +72,16 @@ public class TestBufferedDataBlockEncoder { @Test public void testCommonPrefixComparators() { - KeyValue kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); - KeyValue kv2 = new KeyValue(row1, fam_1_2, qual1, 1l, Type.Maximum); + KeyValue kv1 = new KeyValue(row1, fam1, qual1, 1L, Type.Put); + KeyValue kv2 = new KeyValue(row1, fam_1_2, qual1, 1L, Type.Maximum); assertTrue((BufferedDataBlockEncoder.compareCommonFamilyPrefix(kv1, kv2, 4) < 0)); - kv1 = new KeyValue(row1, fam1, qual1, 1l, Type.Put); - kv2 = new KeyValue(row_1_0, fam_1_2, qual1, 1l, Type.Maximum); + kv1 = new KeyValue(row1, fam1, qual1, 1L, Type.Put); + kv2 = new KeyValue(row_1_0, fam_1_2, qual1, 1L, Type.Maximum); assertTrue((BufferedDataBlockEncoder.compareCommonRowPrefix(kv1, kv2, 4) < 0)); - kv1 = new KeyValue(row1, fam1, qual2, 1l, Type.Put); - kv2 = new KeyValue(row1, fam1, qual1, 1l, Type.Maximum); + kv1 = new KeyValue(row1, fam1, qual2, 1L, Type.Put); + kv2 = new KeyValue(row1, fam1, qual1, 1L, Type.Maximum); assertTrue((BufferedDataBlockEncoder.compareCommonQualifierPrefix(kv1, kv2, 4) > 0)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index f41db93820..cbbc9dc9cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -82,7 +82,7 @@ public class TestDataBlockEncoders { static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; private RedundantKVGenerator generator = new RedundantKVGenerator(); - private Random randomizer = new Random(42l); + private Random randomizer = new Random(42L); private final boolean includesMemstoreTS; private final boolean includesTags; @@ -129,14 +129,14 @@ public class TestDataBlockEncoders { byte[] qualifier = new byte[0]; byte[] value = new byte[0]; if (!includesTags) { - kvList.add(new KeyValue(row, family, qualifier, 0l, value)); - kvList.add(new KeyValue(row, family, qualifier, 0l, value)); + kvList.add(new KeyValue(row, family, qualifier, 0L, value)); + kvList.add(new KeyValue(row, family, qualifier, 0L, value)); } else { byte[] metaValue1 = Bytes.toBytes("metaValue1"); byte[] metaValue2 = Bytes.toBytes("metaValue2"); - kvList.add(new KeyValue(row, family, qualifier, 0l, value, + kvList.add(new KeyValue(row, family, qualifier, 0L, value, new Tag[] { new ArrayBackedTag((byte) 1, metaValue1) })); - kvList.add(new KeyValue(row, family, qualifier, 0l, value, + kvList.add(new KeyValue(row, family, qualifier, 0L, value, new Tag[] { new ArrayBackedTag((byte) 1, metaValue2) })); } testEncodersOnDataset(kvList, includesMemstoreTS, includesTags); @@ -158,13 +158,13 @@ public class TestDataBlockEncoders { if (includesTags) { byte[] metaValue1 = Bytes.toBytes("metaValue1"); byte[] metaValue2 = Bytes.toBytes("metaValue2"); - kvList.add(new KeyValue(row, family, qualifier, 0l, value, + kvList.add(new KeyValue(row, family, qualifier, 0L, value, new Tag[] { new ArrayBackedTag((byte) 1, metaValue1) })); - kvList.add(new KeyValue(row, family, qualifier, 0l, value, + kvList.add(new KeyValue(row, family, qualifier, 0L, value, new Tag[] { new ArrayBackedTag((byte) 1, metaValue2) })); } else { - kvList.add(new KeyValue(row, family, qualifier, -1l, Type.Put, value)); - kvList.add(new KeyValue(row, family, qualifier, -2l, Type.Put, value)); + kvList.add(new KeyValue(row, family, qualifier, -1L, Type.Put, value)); + kvList.add(new KeyValue(row, family, qualifier, -2L, Type.Put, value)); } testEncodersOnDataset(kvList, includesMemstoreTS, includesTags); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java index e62af9e2a1..d46a5535da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java @@ -61,10 +61,12 @@ public class TestLoadAndSwitchEncodeOnDisk extends conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); } + @Override protected int numKeys() { return 3000; } + @Override @Test(timeout=TIMEOUT_MS) public void loadTest() throws Exception { Admin admin = TEST_UTIL.getAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java index e0d2a9be8b..d304e74bc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java @@ -97,12 +97,12 @@ public class TestSeekBeforeWithReverseScan { while (scanner.next(res)) { count++; } - assertEquals(Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), res.get(0) - .getRowLength()), "b"); - assertEquals(Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), res.get(1) - .getRowLength()), "ab"); - assertEquals(Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), res.get(2) - .getRowLength()), "a"); + assertEquals("b", Bytes.toString(res.get(0).getRowArray(), res.get(0).getRowOffset(), + res.get(0).getRowLength())); + assertEquals("ab", Bytes.toString(res.get(1).getRowArray(), res.get(1).getRowOffset(), + res.get(1).getRowLength())); + assertEquals("a", Bytes.toString(res.get(2).getRowArray(), res.get(2).getRowOffset(), + res.get(2).getRowLength())); assertEquals(3, count); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 462f77ae72..4300387a5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -302,7 +302,7 @@ public class CacheTestUtils { @Override public long heapSize() { - return 4 + buf.length; + return 4L + buf.length; } @Override @@ -373,9 +373,10 @@ public class CacheTestUtils { String strKey; /* No conflicting keys */ - for (strKey = new Long(rand.nextLong()).toString(); !usedStrings - .add(strKey); strKey = new Long(rand.nextLong()).toString()) - ; + strKey = Long.toString(rand.nextLong()); + while (!usedStrings.add(strKey)) { + strKey = Long.toString(rand.nextLong()); + } returnedBlocks[i] = new HFileBlockPair(); returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java index a133cb43b6..aaf1711b3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/NanoTimer.java @@ -98,6 +98,7 @@ public class NanoTimer { * * Note: If timer is never started, "ERR" will be returned. */ + @Override public String toString() { if (!readable()) { return "ERR"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 611c524889..6d3d4abfcf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -297,7 +297,7 @@ public class TestCacheOnWrite { // block we cached at write-time and block read from file should be identical assertEquals(block.getChecksumType(), fromCache.getChecksumType()); assertEquals(block.getBlockType(), fromCache.getBlockType()); - assertNotEquals(block.getBlockType(), BlockType.ENCODED_DATA); + assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); assertEquals(block.getOnDiskSizeWithHeader(), fromCache.getOnDiskSizeWithHeader()); assertEquals(block.getOnDiskSizeWithoutHeader(), fromCache.getOnDiskSizeWithoutHeader()); assertEquals( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 9c36788940..a8b7d1f81f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -124,7 +124,7 @@ public class TestHFileBlock { static int writeTestKeyValues(HFileBlock.Writer hbw, int seed, boolean includesMemstoreTS, boolean useTag) throws IOException { List keyValues = new ArrayList<>(); - Random randomizer = new Random(42l + seed); // just any fixed number + Random randomizer = new Random(42L + seed); // just any fixed number // generate keyValues for (int i = 0; i < NUM_KEYVALUES; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index bec774ee0a..a049b329d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -262,7 +262,7 @@ public class TestHFileEncryption { assertTrue("Initial seekTo failed", scanner.seekTo()); for (i = 0; i < 100; i++) { KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size())); - assertEquals("Unable to find KV as expected: " + kv, scanner.seekTo(kv), 0); + assertEquals("Unable to find KV as expected: " + kv, 0, scanner.seekTo(kv)); } } finally { scanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index af169f5026..8429ee8566 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -280,10 +280,10 @@ public class TestLruBlockCache { } // A single eviction run should have occurred - assertEquals(cache.getStats().getEvictionCount(), 1); + assertEquals(1, cache.getStats().getEvictionCount()); // We expect two entries evicted - assertEquals(cache.getStats().getEvictedCount(), 2); + assertEquals(2, cache.getStats().getEvictedCount()); // Our expected size overruns acceptable limit assertTrue(expectedCacheSize > diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 9b2602f61c..3873a6ca16 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -93,7 +93,7 @@ public class TestBucketCache { String ioEngineName = "offheap"; String persistencePath = null; - private class MockedBucketCache extends BucketCache { + private static class MockedBucketCache extends BucketCache { public MockedBucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, int writerThreads, int writerQLen, String persistencePath) throws FileNotFoundException, @@ -314,12 +314,18 @@ public class TestBucketCache { BucketCache cache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath, 100, conf); - assertEquals(BucketCache.ACCEPT_FACTOR_CONFIG_NAME + " failed to propagate.", cache.getAcceptableFactor(), 0.9f, 0); - assertEquals(BucketCache.MIN_FACTOR_CONFIG_NAME + " failed to propagate.", cache.getMinFactor(), 0.5f, 0); - assertEquals(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME + " failed to propagate.", cache.getExtraFreeFactor(), 0.5f, 0); - assertEquals(BucketCache.SINGLE_FACTOR_CONFIG_NAME + " failed to propagate.", cache.getSingleFactor(), 0.1f, 0); - assertEquals(BucketCache.MULTI_FACTOR_CONFIG_NAME + " failed to propagate.", cache.getMultiFactor(), 0.7f, 0); - assertEquals(BucketCache.MEMORY_FACTOR_CONFIG_NAME + " failed to propagate.", cache.getMemoryFactor(), 0.2f, 0); + assertEquals(BucketCache.ACCEPT_FACTOR_CONFIG_NAME + " failed to propagate.", 0.9f, + cache.getAcceptableFactor(), 0); + assertEquals(BucketCache.MIN_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f, + cache.getMinFactor(), 0); + assertEquals(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f, + cache.getExtraFreeFactor(), 0); + assertEquals(BucketCache.SINGLE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.1f, + cache.getSingleFactor(), 0); + assertEquals(BucketCache.MULTI_FACTOR_CONFIG_NAME + " failed to propagate.", 0.7f, + cache.getMultiFactor(), 0); + assertEquals(BucketCache.MEMORY_FACTOR_CONFIG_NAME + " failed to propagate.", 0.2f, + cache.getMemoryFactor(), 0); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java index 09429ddf3d..0a168ba2c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java @@ -148,7 +148,7 @@ public class TestNettyIPC extends AbstractTestIPC { super(server, name, services, bindAddress, conf, scheduler, true); } - final class FailingConnection extends NettyServerRpcConnection { + static final class FailingConnection extends NettyServerRpcConnection { private FailingConnection(TestFailingRpcServer rpcServer, Channel channel) { super(rpcServer, channel); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java index 83a7acca8e..f6f6fc539e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java @@ -112,7 +112,7 @@ public class TestProtoBufRpc { // Test echo method EchoRequestProto echoRequest = EchoRequestProto.newBuilder().setMessage("hello").build(); EchoResponseProto echoResponse = stub.echo(null, echoRequest); - assertEquals(echoResponse.getMessage(), "hello"); + assertEquals("hello", echoResponse.getMessage()); stub.error(null, emptyRequest); fail("Expected exception is not thrown"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java index 9d2fd91c18..9e70c93d6e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientLeaks.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1; import static org.junit.Assert.assertTrue; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; - import java.io.IOException; import java.net.Socket; import java.net.SocketAddress; @@ -39,6 +37,7 @@ import org.apache.hadoop.hbase.client.MetricsConnection; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -50,6 +49,8 @@ import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; + @Category(MediumTests.class) public class TestRpcClientLeaks { @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). @@ -112,7 +113,7 @@ public class TestRpcClientLeaks { conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.valueOf(name.getMethodName())); - table.get(new Get("asd".getBytes())); + table.get(new Get(Bytes.toBytes("asd"))); connection.close(); for (Socket socket : MyRpcClientImpl.savedSockets) { assertTrue("Socket + " + socket + " is not closed", socket.isClosed()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index e4a8767ab0..e646c14714 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -166,7 +166,8 @@ public class TestSimpleRpcScheduler { for (String callQueueName:callQueueInfo.getCallQueueNames()) { for (String calledMethod: callQueueInfo.getCalledMethodNames(callQueueName)) { - assertEquals(callQueueInfo.getCallMethodCount(callQueueName, calledMethod), totalCallMethods); + assertEquals(totalCallMethods, + callQueueInfo.getCallMethodCount(callQueueName, calledMethod)); } } @@ -327,7 +328,7 @@ public class TestSimpleRpcScheduler { RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 2, 1, 1, priority, HConstants.QOS_THRESHOLD); - assertNotEquals(scheduler, null); + assertNotEquals(null, scheduler); } @Test @@ -571,6 +572,7 @@ public class TestSimpleRpcScheduler { }; CallRunner cr = new CallRunner(null, putCall) { + @Override public void run() { if (sleepTime <= 0) return; try { @@ -581,10 +583,12 @@ public class TestSimpleRpcScheduler { } } + @Override public RpcCall getRpcCall() { return putCall; } + @Override public void drop() { } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java index b080d7f426..6b6f0de1e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java @@ -77,6 +77,7 @@ abstract public class MapreduceTestingShim { } private static class MapreduceV1Shim extends MapreduceTestingShim { + @Override public JobContext newJobContext(Configuration jobConf) throws IOException { // Implementing: // return new JobContext(jobConf, new JobID()); @@ -105,6 +106,7 @@ abstract public class MapreduceTestingShim { } } + @Override public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; try { @@ -129,6 +131,7 @@ abstract public class MapreduceTestingShim { }; private static class MapreduceV2Shim extends MapreduceTestingShim { + @Override public JobContext newJobContext(Configuration jobConf) { return newJob(jobConf); } @@ -147,6 +150,7 @@ abstract public class MapreduceTestingShim { } } + @Override public JobConf obtainJobConf(MiniMRCluster cluster) { try { Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index 16f98a0a83..53e80f3dba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -365,10 +365,8 @@ public class TestAssignmentListener { drainingServerTracker.start(); // Confirm our ServerManager lists are empty. - Assert.assertEquals(serverManager.getOnlineServers(), - new HashMap()); - Assert.assertEquals(serverManager.getDrainingServersList(), - new ArrayList()); + Assert.assertEquals(new HashMap(), serverManager.getOnlineServers()); + Assert.assertEquals(new ArrayList(), serverManager.getDrainingServersList()); // checkAndRecordNewServer() is how servers are added to the ServerManager. ArrayList onlineDrainingServers = new ArrayList<>(); @@ -381,8 +379,7 @@ public class TestAssignmentListener { } // Verify the ServerManager lists are correctly updated. - Assert.assertEquals(serverManager.getOnlineServers(), onlineServers); - Assert.assertEquals(serverManager.getDrainingServersList(), - onlineDrainingServers); + Assert.assertEquals(onlineServers, serverManager.getOnlineServers()); + Assert.assertEquals(onlineDrainingServers, serverManager.getDrainingServersList()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index e8aa755b80..aa5ad1efab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -190,7 +190,7 @@ public class TestMasterFailover { RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper()); assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName); - assertEquals("hbase:meta should be online on RS", metaState.getState(), State.OPEN); + assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState()); // Start up a new master LOG.info("Starting up a new master"); @@ -203,7 +203,7 @@ public class TestMasterFailover { metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper()); assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName); - assertEquals("hbase:meta should be online on RS", metaState.getState(), State.OPEN); + assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState()); // Done, shutdown the cluster } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index fd44c891b8..15c8b6a82f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -189,6 +189,7 @@ public class TestMasterNoCluster { TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO); HMaster master = new HMaster(conf) { + @Override InetAddress getRemoteInetAddress(final int port, final long serverStartCode) throws UnknownHostException { // Return different address dependent on port passed. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index cd7af506ce..e399f2eacd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.EnumSet; @@ -242,7 +243,7 @@ public class TestMasterOperationsForRegionReplicas { } assert(defaultReplicas.size() == numRegions); Collection counts = new HashSet<>(defaultReplicas.values()); - assert(counts.size() == 1 && counts.contains(new Integer(numReplica))); + assert(counts.size() == 1 && counts.contains(numReplica)); } finally { ADMIN.disableTable(tableName); ADMIN.deleteTable(tableName); @@ -336,7 +337,7 @@ public class TestMasterOperationsForRegionReplicas { byte[] startKey = region.getStartKey(); if (region.getTable().equals(table)) { setOfStartKeys.add(startKey); //ignore other tables - LOG.info("--STARTKEY " + new String(startKey)+"--"); + LOG.info("--STARTKEY {}--", new String(startKey, StandardCharsets.UTF_8)); } } // the number of startkeys will be equal to the number of regions hosted in each server diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java index 29c24f77b5..648ea643bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java @@ -121,6 +121,7 @@ public class TestMasterShutdown { master.start(); LOG.info("Called master start on " + master.getName()); Thread shutdownThread = new Thread("Shutdown-Thread") { + @Override public void run() { LOG.info("Before call to shutdown master"); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index ebfec22098..2e6c6995a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -88,8 +88,7 @@ public class TestMetaShutdownHandler { } RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", - metaState.getState(), RegionState.State.OPEN); + assertEquals("Meta should be not in transition", RegionState.State.OPEN, metaState.getState()); assertNotEquals("Meta should be moved off master", metaServerName, master.getServerName()); @@ -115,8 +114,7 @@ public class TestMetaShutdownHandler { regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); // Now, make sure meta is registered in zk metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", - metaState.getState(), RegionState.State.OPEN); + assertEquals("Meta should be not in transition", RegionState.State.OPEN, metaState.getState()); assertEquals("Meta should be assigned", metaState.getServerName(), regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO)); assertNotEquals("Meta should be assigned on a different server", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index e99d533e18..279495282e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -389,8 +389,7 @@ public class TestRegionPlacement { lastRegionOpenedCount = currentRegionOpened; assertEquals("There are only " + regionMovement + " instead of " - + expected + " region movement for " + attempt + " attempts", - regionMovement, expected); + + expected + " region movement for " + attempt + " attempts", expected, regionMovement); } /** @@ -469,6 +468,7 @@ public class TestRegionPlacement { final AtomicInteger totalRegionNum = new AtomicInteger(0); LOG.info("The start of region placement verification"); MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { + @Override public boolean visit(Result result) throws IOException { try { @SuppressWarnings("deprecation") diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index cd5239edbe..128d7ee444 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -384,23 +384,23 @@ public class TestSplitLogManager { @Test (timeout=180000) public void testTaskResigned() throws Exception { LOG.info("TestTaskResigned - resubmit task node once in RESIGNED state"); - assertEquals(tot_mgr_resubmit.sum(), 0); + assertEquals(0, tot_mgr_resubmit.sum()); slm = new SplitLogManager(master, conf); - assertEquals(tot_mgr_resubmit.sum(), 0); + assertEquals(0, tot_mgr_resubmit.sum()); TaskBatch batch = new TaskBatch(); String tasknode = submitTaskAndWait(batch, "foo/1"); - assertEquals(tot_mgr_resubmit.sum(), 0); + assertEquals(0, tot_mgr_resubmit.sum()); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); - assertEquals(tot_mgr_resubmit.sum(), 0); + assertEquals(0, tot_mgr_resubmit.sum()); SplitLogTask slt = new SplitLogTask.Resigned(worker1); - assertEquals(tot_mgr_resubmit.sum(), 0); + assertEquals(0, tot_mgr_resubmit.sum()); ZKUtil.setData(zkw, tasknode, slt.toByteArray()); ZKUtil.checkExists(zkw, tasknode); // Could be small race here. if (tot_mgr_resubmit.sum() == 0) { waitForCounter(tot_mgr_resubmit, 0, 1, to/2); } - assertEquals(tot_mgr_resubmit.sum(), 1); + assertEquals(1, tot_mgr_resubmit.sum()); byte[] taskstate = ZKUtil.getData(zkw, tasknode); slt = SplitLogTask.parseFrom(taskstate); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java index 1f61ee7d34..5a75297391 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java @@ -65,9 +65,8 @@ public class TestTableStateManager { TEST_UTIL.restartHBaseCluster(1); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - Assert.assertEquals( - master.getTableStateManager().getTableState(tableName), - TableState.State.DISABLED); + Assert.assertEquals(TableState.State.DISABLED, + master.getTableStateManager().getTableState(tableName)); } private void setTableStateInZK(ZKWatcher watcher, final TableName tableName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index 83fafff970..9bd4443d52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -107,10 +107,12 @@ public class MockMasterServices extends MockNoopMasterServices { this.walManager = new MasterWalManager(this); // Mock an AM. this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this)) { + @Override public boolean isTableEnabled(final TableName tableName) { return true; } + @Override public boolean isTableDisabled(final TableName tableName) { return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java index 37d982082e..d2a4020cc9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java @@ -501,8 +501,8 @@ public class TestSplitTableRegionProcedure { daughters.get(i), startRow, numRows, - ColumnFamilyName1.getBytes(), - ColumnFamilyName2.getBytes()); + Bytes.toBytes(ColumnFamilyName1), + Bytes.toBytes(ColumnFamilyName2)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index adf56b8fe8..e180fb516d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -159,6 +159,7 @@ public class BalancerTestBase { public MockMapping(Configuration conf) { } + @Override public List resolve(List names) { List ret = new ArrayList<>(names.size()); for (String name : names) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 08b27ec9c4..644de6ad9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -392,6 +392,7 @@ public class TestLogsCleaner { .when(zk).getData("/hbase/replication/rs", null, new Stat()); } + @Override public RecoverableZooKeeper getRecoverableZooKeeper() { return zk; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index 29487014b9..85f0d1fb98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -351,6 +351,7 @@ public class TestReplicationHFileCleaner { .when(zk).getData("/hbase/replication/hfile-refs", null, new Stat()); } + @Override public RecoverableZooKeeper getRecoverableZooKeeper() { return zk; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 6069041a4e..6a71df3ecd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -183,7 +183,7 @@ public class TestSnapshotFromMaster { DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class); Mockito.when(mockHandler.getException()).thenReturn(null); Mockito.when(mockHandler.getSnapshot()).thenReturn(desc); - Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true)); + Mockito.when(mockHandler.isFinished()).thenReturn(Boolean.TRUE); Mockito.when(mockHandler.getCompletionTimestamp()) .thenReturn(EnvironmentEdgeManager.currentTime()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java index 7f031cced6..1f743db7b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.hamcrest.core.IsInstanceOf; import org.hamcrest.core.StringStartsWith; import org.junit.After; @@ -60,6 +61,7 @@ import org.junit.rules.TestName; import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; @@ -105,8 +107,10 @@ public class TestLockProcedure { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(1); UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); - UTIL.createTable(tableName1, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()}); - UTIL.createTable(tableName2, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()}); + UTIL.createTable(tableName1, + new byte[][]{ Bytes.toBytes("fam")}, new byte[][] {Bytes.toBytes("1")}); + UTIL.createTable(tableName2, + new byte[][]{Bytes.toBytes("fam")}, new byte[][] {Bytes.toBytes("1")}); masterRpcService = UTIL.getHBaseCluster().getMaster().getMasterRpcServices(); procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor(); tableRegions1 = UTIL.getAdmin().getRegions(tableName1); @@ -195,7 +199,7 @@ public class TestLockProcedure { LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null, LockHeartbeatRequest.newBuilder().setProcId(procId).build()); if (response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { - assertEquals(response.getTimeoutMs(), HEARTBEAT_TIMEOUT); + assertEquals(HEARTBEAT_TIMEOUT, response.getTimeoutMs()); LOG.debug(String.format("Proc id %s acquired lock.", procId)); return true; } @@ -349,7 +353,8 @@ public class TestLockProcedure { CountDownLatch latch = new CountDownLatch(1); // MasterRpcServices don't set latch with LockProcedure, so create one and submit it directly. LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(), - TableName.valueOf("table"), org.apache.hadoop.hbase.procedure2.LockType.EXCLUSIVE, "desc", latch); + TableName.valueOf("table"), + org.apache.hadoop.hbase.procedure2.LockType.EXCLUSIVE, "desc", latch); procExec.submitProcedure(lockProc); assertTrue(latch.await(2000, TimeUnit.MILLISECONDS)); releaseLock(lockProc.getProcId()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java index 21d914ae23..e5d3a7944d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java @@ -102,6 +102,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase this.hri = hri; } + @Override public Procedure newProcedure(long procId) { return new RegionProcedure(procId, hri); } @@ -132,6 +133,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase this.tableName = tableName; } + @Override public Procedure newProcedure(long procId) { return new TableProcedure(procId, tableName); } @@ -196,6 +198,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase private final AtomicLong completed = new AtomicLong(0); private class AddProcsWorker extends Thread { + @Override public void run() { final Random rand = new Random(System.currentTimeMillis()); long procId = procIds.incrementAndGet(); @@ -209,6 +212,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase } private class PollAndLockWorker extends Thread { + @Override public void run() { while (completed.get() < numOps) { // With lock/unlock being ~100ns, and no other workload, 1000ns wait seams reasonable. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java index 8dec59d0e8..4adab53d00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java @@ -99,7 +99,7 @@ public class TestModifyNamespaceProcedure { // Before modify NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); - assertEquals(currentNsDescriptor.getConfigurationValue(nsKey1), nsValue1before); + assertEquals(nsValue1before, currentNsDescriptor.getConfigurationValue(nsKey1)); assertNull(currentNsDescriptor.getConfigurationValue(nsKey2)); // Update @@ -115,8 +115,8 @@ public class TestModifyNamespaceProcedure { // Verify the namespace is updated. currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); - assertEquals(nsd.getConfigurationValue(nsKey1), nsValue1after); - assertEquals(currentNsDescriptor.getConfigurationValue(nsKey2), nsValue2); + assertEquals(nsValue1after, nsd.getConfigurationValue(nsKey1)); + assertEquals(nsValue2, currentNsDescriptor.getConfigurationValue(nsKey2)); } @Test(timeout=60000) @@ -219,7 +219,7 @@ public class TestModifyNamespaceProcedure { // Validate NamespaceDescriptor currentNsDescriptor = UTIL.getAdmin().getNamespaceDescriptor(nsd.getName()); - assertEquals(currentNsDescriptor.getConfigurationValue(nsKey), nsValue); + assertEquals(nsValue, currentNsDescriptor.getConfigurationValue(nsKey)); } @Test(timeout = 60000) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index 8b58646261..24a6bc5d1b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -103,7 +104,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); assertEquals(2, currentHtd.getFamiliesKeys().size()); - assertTrue(currentHtd.hasFamily(cf2.getBytes())); + assertTrue(currentHtd.hasFamily(Bytes.toBytes(cf2))); // Test 2: Modify the table descriptor offline UTIL.getAdmin().disableTable(tableName); @@ -119,7 +120,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); - assertTrue(currentHtd.hasFamily(cf3.getBytes())); + assertTrue(currentHtd.hasFamily(Bytes.toBytes(cf3))); assertEquals(3, currentHtd.getFamiliesKeys().size()); } @@ -137,7 +138,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { // Test 1: Modify the table descriptor HTableDescriptor htd = new HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName)); - htd.removeFamily(cf2.getBytes()); + htd.removeFamily(Bytes.toBytes(cf2)); long procId = ProcedureTestingUtility.submitAndWait( procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); @@ -145,7 +146,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); assertEquals(2, currentHtd.getFamiliesKeys().size()); - assertFalse(currentHtd.hasFamily(cf2.getBytes())); + assertFalse(currentHtd.hasFamily(Bytes.toBytes(cf2))); // Test 2: Modify the table descriptor offline UTIL.getAdmin().disableTable(tableName); @@ -153,7 +154,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { HTableDescriptor htd2 = new HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName)); - htd2.removeFamily(cf3.getBytes()); + htd2.removeFamily(Bytes.toBytes(cf3)); // Disable Sanity check htd2.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString()); @@ -164,12 +165,12 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); assertEquals(1, currentHtd.getFamiliesKeys().size()); - assertFalse(currentHtd.hasFamily(cf3.getBytes())); + assertFalse(currentHtd.hasFamily(Bytes.toBytes(cf3))); //Removing the last family will fail HTableDescriptor htd3 = new HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName)); - htd3.removeFamily(cf1.getBytes()); + htd3.removeFamily(Bytes.toBytes(cf1)); long procId3 = ProcedureTestingUtility.submitAndWait(procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd3)); @@ -179,7 +180,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { assertTrue("expected DoNotRetryIOException, got " + cause, cause instanceof DoNotRetryIOException); assertEquals(1, currentHtd.getFamiliesKeys().size()); - assertTrue(currentHtd.hasFamily(cf1.getBytes())); + assertTrue(currentHtd.hasFamily(Bytes.toBytes(cf1))); } @Test(timeout=60000) @@ -202,7 +203,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true; htd.setCompactionEnabled(newCompactionEnableOption); htd.addFamily(new HColumnDescriptor(cf2)); - htd.removeFamily(cf3.getBytes()); + htd.removeFamily(Bytes.toBytes(cf3)); htd.setRegionReplication(3); // Start the Modify procedure && kill the executor @@ -240,7 +241,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true; htd.setCompactionEnabled(newCompactionEnableOption); htd.addFamily(new HColumnDescriptor(cf2)); - htd.removeFamily(cf3.getBytes()); + htd.removeFamily(Bytes.toBytes(cf3)); // Start the Modify procedure && kill the executor long procId = procExec.submitProcedure( @@ -253,8 +254,8 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { HTableDescriptor currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled()); assertEquals(2, currentHtd.getFamiliesKeys().size()); - assertTrue(currentHtd.hasFamily(cf2.getBytes())); - assertFalse(currentHtd.hasFamily(cf3.getBytes())); + assertTrue(currentHtd.hasFamily(Bytes.toBytes(cf2))); + assertFalse(currentHtd.hasFamily(Bytes.toBytes(cf3))); // cf2 should be added cf3 should be removed MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java index 2834b8f404..2140d5eaf8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestWALProcedureStoreOnHDFS.java @@ -135,7 +135,7 @@ public class TestWALProcedureStoreOnHDFS { final AtomicInteger reCount = new AtomicInteger(0); Thread[] thread = new Thread[store.getNumThreads() * 2 + 1]; for (int i = 0; i < thread.length; ++i) { - final long procId = i + 1; + final long procId = i + 1L; thread[i] = new Thread(() -> { try { LOG.debug("[S] INSERT " + procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java index 60c9c4b71b..382625c482 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java @@ -203,6 +203,7 @@ public class TestSnapshotFileCache { } class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { + @Override public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { Collection files = new HashSet<>(); files.addAll(SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotDir)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java index ad6c58e4eb..8f81946753 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java @@ -114,7 +114,8 @@ public class TestSnapshotHFileCleaner { assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile))); } - class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { + static class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { + @Override public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { Collection files = new HashSet<>(); files.addAll(SnapshotReferenceUtil.getHFileNames(TEST_UTIL.getConfiguration(), fs, snapshotDir)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java index 32e65220d8..65f50c9f7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hbase.mob; -import junit.framework.TestCase; +import static org.junit.Assert.assertEquals; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -34,13 +34,15 @@ import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Category(SmallTests.class) -public class TestCachedMobFile extends TestCase{ +public class TestCachedMobFile { static final Logger LOG = LoggerFactory.getLogger(TestCachedMobFile.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private Configuration conf = TEST_UTIL.getConfiguration(); @@ -50,10 +52,12 @@ public class TestCachedMobFile extends TestCase{ private static final long EXPECTED_REFERENCE_ZERO = 0; private static final long EXPECTED_REFERENCE_ONE = 1; private static final long EXPECTED_REFERENCE_TWO = 2; + @Rule + public TestName testName = new TestName(); @Test public void testOpenClose() throws Exception { - String caseName = getName(); + String caseName = testName.getMethodName(); Path testDir = TEST_UTIL.getDataTestDir(); FileSystem fs = testDir.getFileSystem(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build(); @@ -61,21 +65,21 @@ public class TestCachedMobFile extends TestCase{ .withOutputDir(testDir).withFileContext(meta).build(); MobTestUtil.writeStoreFile(writer, caseName); CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); - Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); + assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); cachedMobFile.open(); - Assert.assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount()); + assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount()); cachedMobFile.open(); - Assert.assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile.getReferenceCount()); + assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile.getReferenceCount()); cachedMobFile.close(); - Assert.assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount()); + assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount()); cachedMobFile.close(); - Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); + assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); } @SuppressWarnings("SelfComparison") @Test public void testCompare() throws Exception { - String caseName = getName(); + String caseName = testName.getMethodName(); Path testDir = TEST_UTIL.getDataTestDir(); FileSystem fs = testDir.getFileSystem(conf); Path outputDir1 = new Path(testDir, FAMILY1); @@ -86,16 +90,16 @@ public class TestCachedMobFile extends TestCase{ CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf); Path outputDir2 = new Path(testDir, FAMILY2); StoreFileWriter writer2 = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(outputDir2) - .withFileContext(meta) - .build(); + .withOutputDir(outputDir2) + .withFileContext(meta) + .build(); MobTestUtil.writeStoreFile(writer2, caseName); CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf); cachedMobFile1.access(1); cachedMobFile2.access(2); - Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile2), 1); - Assert.assertEquals(cachedMobFile2.compareTo(cachedMobFile1), -1); - Assert.assertEquals(cachedMobFile1.compareTo(cachedMobFile1), 0); + assertEquals(1, cachedMobFile1.compareTo(cachedMobFile2)); + assertEquals(-1, cachedMobFile2.compareTo(cachedMobFile1)); + assertEquals(0, cachedMobFile1.compareTo(cachedMobFile1)); } @Test @@ -105,7 +109,7 @@ public class TestCachedMobFile extends TestCase{ HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(testDir).withFileContext(meta).build(); - String caseName = getName(); + String caseName = testName.getMethodName(); MobTestUtil.writeStoreFile(writer, caseName); CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); byte[] family = Bytes.toBytes(caseName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java index f894fb2511..cc2aa5cef2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java @@ -18,7 +18,9 @@ */ package org.apache.hadoop.hbase.mob; -import junit.framework.TestCase; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -36,17 +38,21 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Category(SmallTests.class) -public class TestMobFile extends TestCase { +public class TestMobFile { static final Logger LOG = LoggerFactory.getLogger(TestMobFile.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private Configuration conf = TEST_UTIL.getConfiguration(); private CacheConfig cacheConf = new CacheConfig(conf); + @Rule + public TestName testName = new TestName(); @Test public void testReadKeyValue() throws Exception { @@ -57,7 +63,7 @@ public class TestMobFile extends TestCase { .withOutputDir(testDir) .withFileContext(meta) .build(); - String caseName = getName(); + String caseName = testName.getMethodName(); MobTestUtil.writeStoreFile(writer, caseName); MobFile mobFile = @@ -110,7 +116,7 @@ public class TestMobFile extends TestCase { .withOutputDir(testDir) .withFileContext(meta) .build(); - MobTestUtil.writeStoreFile(writer, getName()); + MobTestUtil.writeStoreFile(writer, testName.getMethodName()); MobFile mobFile = new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java index 42e652867d..5077728e1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java @@ -18,11 +18,12 @@ */ package org.apache.hadoop.hbase.mob; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import java.io.IOException; import java.util.Date; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,13 +39,15 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Category(SmallTests.class) -public class TestMobFileCache extends TestCase { +public class TestMobFileCache { static final Logger LOG = LoggerFactory.getLogger(TestMobFileCache.class); private HBaseTestingUtility UTIL; private HRegion region; @@ -73,7 +76,7 @@ public class TestMobFileCache extends TestCase { private static final byte[] QF2 = Bytes.toBytes("qf2"); private static final byte[] QF3 = Bytes.toBytes("qf3"); - @Override + @Before public void setUp() throws Exception { UTIL = HBaseTestingUtility.createLocalHTU(); conf = UTIL.getConfiguration(); @@ -93,8 +96,8 @@ public class TestMobFileCache extends TestCase { region = UTIL.createLocalHRegion(htd, null, null); } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { region.close(); region.getFilesystem().delete(UTIL.getDataTestDir(), true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java index 8478e20903..5ca73a6d4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileName.java @@ -18,26 +18,31 @@ */ package org.apache.hadoop.hbase.mob; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertTrue; + import java.util.Date; import java.util.Random; import java.util.UUID; -import junit.framework.TestCase; - import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.MD5Hash; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @Category(SmallTests.class) -public class TestMobFileName extends TestCase { +public class TestMobFileName { private String uuid; private Date date; private String dateStr; private byte[] startKey; + @Before public void setUp() { Random random = new Random(); uuid = UUID.randomUUID().toString().replaceAll("-", ""); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index 94680f2c80..214fe4970a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -146,7 +146,7 @@ public class TestNamespaceAuditor { .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); - assertEquals(ADMIN.listNamespaceDescriptors().length, 3); + assertEquals(3, ADMIN.listNamespaceDescriptors().length); HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); HTableDescriptor tableDescOne = @@ -592,7 +592,7 @@ public class TestNamespaceAuditor { .build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); - assertEquals(ADMIN.listNamespaceDescriptors().length, 3); + assertEquals(3, ADMIN.listNamespaceDescriptors().length); HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java index 0603b21446..5ceb8e6fd4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedure.java @@ -52,7 +52,7 @@ public class TestProcedure { when(coord.getRpcs()).thenReturn(comms); // make it not null } - class LatchedProcedure extends Procedure { + static class LatchedProcedure extends Procedure { CountDownLatch startedAcquireBarrier = new CountDownLatch(1); CountDownLatch startedDuringBarrier = new CountDownLatch(1); CountDownLatch completedProcedure = new CountDownLatch(1); @@ -93,6 +93,7 @@ public class TestProcedure { final LatchedProcedure procspy = spy(proc); // coordinator: start the barrier procedure new Thread() { + @Override public void run() { procspy.call(); } @@ -139,6 +140,7 @@ public class TestProcedure { final LatchedProcedure procspy = spy(proc); // start the barrier procedure new Thread() { + @Override public void run() { procspy.call(); } @@ -192,6 +194,7 @@ public class TestProcedure { // start the barrier procedure Thread t = new Thread() { + @Override public void run() { procspy.call(); } @@ -214,6 +217,7 @@ public class TestProcedure { // start the barrier procedure Thread t = new Thread() { + @Override public void run() { procspy.call(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java index 1678a50ee9..e2b1a79669 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java @@ -213,6 +213,7 @@ public class TestProcedureCoordinator { final Procedure spy = spy(task); AcquireBarrierAnswer prepare = new AcquireBarrierAnswer(procName, cohort) { + @Override public void doWork() { // then do some fun where we commit before all nodes have prepared // "one" commits before anyone else is done @@ -288,7 +289,7 @@ public class TestProcedureCoordinator { inorder.verify(controller).sendGlobalBarrierReached(eq(task), anyListOf(String.class)); } - private abstract class OperationAnswer implements Answer { + private static abstract class OperationAnswer implements Answer { private boolean ran = false; public void ensureRan() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java index 1f5cbe6db3..c1398626e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java @@ -244,7 +244,7 @@ public class TestZKProcedure { Subprocedure r = ((Subprocedure) invocation.getMock()); LOG.error("Remote commit failure, not propagating error:" + remoteCause); comms.receiveAbortProcedure(r.getName(), remoteCause); - assertEquals(r.isComplete(), true); + assertEquals(true, r.isComplete()); // don't complete the error phase until the coordinator has gotten the error // notification (which ensures that we never progress past prepare) try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java index a44ad74f50..ce0d2f21a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaStatusRPCs.java @@ -142,7 +142,7 @@ public class TestQuotaStatusRPCs { assertTrue( "Observed table usage was " + snapshot.getUsage(), snapshot.getUsage() >= tableSize); - assertEquals(snapshot.getLimit(), sizeLimit); + assertEquals(sizeLimit, snapshot.getLimit()); SpaceQuotaStatus pbStatus = snapshot.getQuotaStatus(); assertFalse(pbStatus.isInViolation()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java index 300268f087..9ecde78b93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSuperUserQuotaPermissions.java @@ -267,6 +267,7 @@ public class TestSuperUserQuotaPermissions { private T doAsUser(UserGroupInformation ugi, Callable task) throws Exception { return ugi.doAs(new PrivilegedExceptionAction() { + @Override public T run() throws Exception { return task.call(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index 82e17555bb..57948b63e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -51,7 +51,7 @@ public class EncodedSeekPerformanceTest { /** Use this benchmark with default options */ public EncodedSeekPerformanceTest() { configuration.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.5f); - randomizer = new Random(42l); + randomizer = new Random(42L); numberOfSeeks = DEFAULT_NUMBER_OF_SEEKS; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java index e1596787a5..8199b07344 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java @@ -111,11 +111,13 @@ public class MockHStoreFile extends HStoreFile { this.entryCount = entryCount; } + @Override public OptionalLong getMinimumTimestamp() { return timeRangeTracker == null ? OptionalLong.empty() : OptionalLong.of(timeRangeTracker.getMin()); } + @Override public OptionalLong getMaximumTimestamp() { return timeRangeTracker == null ? OptionalLong.empty() : OptionalLong.of(timeRangeTracker.getMax()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java index 5af7d9657e..f89be4325d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java @@ -46,6 +46,7 @@ public class StatefulStoreMockMaker { return 0; } private class CancelAnswer implements Answer { + @Override public CompactionContext answer(InvocationOnMock invocation) throws Throwable { cancelCompaction(invocation.getArgument(0)); return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index 774888c642..c8a75f03c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -364,9 +364,9 @@ public class TestAtomicOperation { assertEquals(0, failures.get()); Get g = new Get(row); Result result = region.get(g); - assertEquals(result.getValue(fam1, qual1).length, 10000); - assertEquals(result.getValue(fam1, qual2).length, 10000); - assertEquals(result.getValue(fam2, qual3).length, 10000); + assertEquals(10000, result.getValue(fam1, qual1).length); + assertEquals(10000, result.getValue(fam1, qual2).length); + assertEquals(10000, result.getValue(fam2, qual3).length); } /** * Test multi-threaded row mutations. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index 06cbf7a51d..a0babe8cee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -50,6 +50,7 @@ public class TestBlocksScanned extends HBaseTestCase { private static HBaseTestingUtility TEST_UTIL = null; + @Override @Before public void setUp() throws Exception { super.setUp(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index f3c5da60e9..0121aef912 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -112,6 +112,7 @@ public class TestBulkLoad { argThat(bulkLogWalEdit(WALEdit.BULK_LOAD, tableName.toBytes(), familyName, storeFileNames)), anyBoolean())).thenAnswer(new Answer() { + @Override public Object answer(InvocationOnMock invocation) { WALKeyImpl walKey = invocation.getArgument(1); MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); @@ -137,6 +138,7 @@ public class TestBulkLoad { when(log.append(any(), any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)), anyBoolean())).thenAnswer(new Answer() { + @Override public Object answer(InvocationOnMock invocation) { WALKeyImpl walKey = invocation.getArgument(1); MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); @@ -156,6 +158,7 @@ public class TestBulkLoad { when(log.append(any(), any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)), anyBoolean())).thenAnswer(new Answer() { + @Override public Object answer(InvocationOnMock invocation) { WALKeyImpl walKey = invocation.getArgument(1); MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); @@ -176,6 +179,7 @@ public class TestBulkLoad { when(log.append(any(), any(), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)), anyBoolean())).thenAnswer(new Answer() { + @Override public Object answer(InvocationOnMock invocation) { WALKeyImpl walKey = invocation.getArgument(1); MultiVersionConcurrencyControl mvcc = walKey.getMvcc(); @@ -281,7 +285,7 @@ public class TestBulkLoad { writer.append(new KeyValue(CellUtil.createCell(randomBytes, family, randomBytes, - 0l, + 0L, KeyValue.Type.Put.getCode(), randomBytes))); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java index 0717b4d8ed..734b930adf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -18,25 +18,25 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.lang.management.ManagementFactory; - import java.nio.ByteBuffer; import java.util.Iterator; import java.util.NavigableMap; import java.util.NavigableSet; import java.util.SortedSet; -import junit.framework.TestCase; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; - import org.apache.hadoop.hbase.io.util.MemorySizeUtil; - - import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.ByteBufferUtils; @@ -48,11 +48,9 @@ import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import static org.junit.Assert.assertTrue; - @Category({RegionServerTests.class, SmallTests.class}) @RunWith(Parameterized.class) -public class TestCellFlatSet extends TestCase { +public class TestCellFlatSet { @Parameterized.Parameters public static Object[] data() { return new Object[] { "SMALL_CHUNKS", "NORMAL_CHUNKS" }; // test with different chunk sizes @@ -77,25 +75,22 @@ public class TestCellFlatSet extends TestCase { public TestCellFlatSet(String chunkType){ long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(CONF, false)); - if (chunkType == "NORMAL_CHUNKS") { + if (chunkType.equals("NORMAL_CHUNKS")) { chunkCreator = ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); - assertTrue(chunkCreator != null); + assertNotNull(chunkCreator); smallChunks = false; } else { // chunkCreator with smaller chunk size, so only 3 cell-representations can accommodate a chunk chunkCreator = ChunkCreator.initialize(SMALL_CHUNK_SIZE, false, globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); - assertTrue(chunkCreator != null); + assertNotNull(chunkCreator); smallChunks = true; } } @Before - @Override public void setUp() throws Exception { - super.setUp(); - // create array of Cells to bass to the CellFlatMap under CellSet final byte[] one = Bytes.toBytes(15); final byte[] two = Bytes.toBytes(25); @@ -126,7 +121,7 @@ public class TestCellFlatSet extends TestCase { ascCCM = setUpCellChunkMap(true); descCCM = setUpCellChunkMap(false); - if (smallChunks == true) { // check jumbo chunks as well + if (smallChunks) { // check jumbo chunks as well ascCCM = setUpJumboCellChunkMap(true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java index 4fa06b0524..a58a22e701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellSkipListSet.java @@ -36,6 +36,7 @@ public class TestCellSkipListSet extends TestCase { private final CellSet csls = new CellSet(CellComparatorImpl.COMPARATOR); + @Override protected void setUp() throws Exception { super.setUp(); this.csls.clear(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index c0ba621008..87e4affabb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -56,6 +56,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; /** @@ -197,7 +198,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), System.currentTimeMillis())); if (i + 1 == ROW_COUNT) { - assertEquals(nr, null); + assertNull(nr); } else { assertTrue(CellComparator.getInstance().compareRows(nr, new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); @@ -317,7 +318,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest(); EnvironmentEdgeManager.injectEdge(edge); long t = memstore.timeOfOldestEdit(); - assertEquals(t, Long.MAX_VALUE); + assertEquals(Long.MAX_VALUE, t); // test the case that the timeOfOldestEdit is updated after a KV add memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"), null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 1a04c8e39a..030658e564 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -454,7 +454,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore count++; } } - assertEquals("the count should be ", count, 150); + assertEquals("the count should be ", 150, count); for(int i = 0; i < scanners.size(); i++) { scanners.get(i).close(); } @@ -481,7 +481,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore } finally { itr.close(); } - assertEquals("the count should be ", cnt, 150); + assertEquals("the count should be ", 150, cnt); } private void addRowsByKeysWith50Cols(AbstractMemStore hmc, String[] keys) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index afe322810f..f017617f86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -116,7 +116,8 @@ public class TestCompaction { // Increment the least significant character so we get to next row. secondRowBytes[START_KEY_BYTES.length - 1]++; thirdRowBytes = START_KEY_BYTES.clone(); - thirdRowBytes[START_KEY_BYTES.length - 1] += 2; + thirdRowBytes[START_KEY_BYTES.length - 1] = + (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2); } @Before @@ -264,7 +265,7 @@ public class TestCompaction { FileSystem fs = store.getFileSystem(); // default compaction policy created one and only one new compacted file Path dstPath = store.getRegionFileSystem().createTempName(); - FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null); + FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, 1024L, null); stream.writeChars("CORRUPT FILE!!!!"); stream.close(); Path origPath = store.getRegionFileSystem().commitStoreFile( @@ -390,7 +391,7 @@ public class TestCompaction { class StoreMockMaker extends StatefulStoreMockMaker { public ArrayList compacting = new ArrayList<>(); public ArrayList notCompacting = new ArrayList<>(); - private ArrayList results; + private final ArrayList results; public StoreMockMaker(ArrayList results) { this.results = results; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java index d2e1866841..e9f381e6b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java @@ -125,6 +125,7 @@ public class TestCompactionArchiveConcurrentClose { // now run the cleaner with a concurrent close Thread cleanerThread = new Thread() { + @Override public void run() { cleaner.chore(); } @@ -138,6 +139,7 @@ public class TestCompactionArchiveConcurrentClose { } final AtomicReference closeException = new AtomicReference<>(); Thread closeThread = new Thread() { + @Override public void run() { // wait for the chore to complete and call close try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java index 112fe4de30..0dafd8053c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java @@ -141,7 +141,7 @@ public class TestCompactionFileNotFound { } }); // Split at this point should not result in the RS being aborted - assertEquals(util.getMiniHBaseCluster().getLiveRegionServerThreads().size(), 3); + assertEquals(3, util.getMiniHBaseCluster().getLiveRegionServerThreads().size()); } finally { if (admin != null) { admin.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index b89fb0e194..53e04e049a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -575,7 +575,7 @@ public class TestDefaultMemStore { Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), System.currentTimeMillis())); if (i + 1 == ROW_COUNT) { - assertEquals(nr, null); + assertNull(nr); } else { assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr, new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); @@ -863,7 +863,7 @@ public class TestDefaultMemStore { EnvironmentEdgeManager.injectEdge(edge); DefaultMemStore memstore = new DefaultMemStore(); long t = memstore.timeOfOldestEdit(); - assertEquals(t, Long.MAX_VALUE); + assertEquals(Long.MAX_VALUE, t); // test the case that the timeOfOldestEdit is updated after a KV add memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"), null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 642b47108f..aa38b7aaca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -168,7 +169,7 @@ public class TestGetClosestAtOrBefore { byte [] metaKey = HRegionInfo.createRegionName( tableb, tofindBytes, HConstants.NINES, false); - LOG.info("find=" + new String(metaKey)); + LOG.info("find=" + new String(metaKey, StandardCharsets.UTF_8)); Result r = UTIL.getClosestRowBefore(mr, metaKey, HConstants.CATALOG_FAMILY); if (answer == -1) { assertNull(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 3c11b3164c..ee11075d78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -43,6 +43,7 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.io.InterruptedIOException; import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -669,7 +670,7 @@ public class TestHRegion { MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HStore store : region.getStores()) { - maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1); + maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId - 1); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); assertEquals(maxSeqId, seqId); @@ -721,7 +722,7 @@ public class TestHRegion { MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HStore store : region.getStores()) { - maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); + maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), recoverSeqId - 1); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); assertEquals(maxSeqId, seqId); @@ -766,7 +767,7 @@ public class TestHRegion { Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HStore store : region.getStores()) { - maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId); + maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, null); assertEquals(minSeqId, seqId); @@ -824,7 +825,7 @@ public class TestHRegion { Map maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); MonitoredTask status = TaskMonitor.get().createStatus(method); for (HStore store : region.getStores()) { - maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); + maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), recoverSeqId - 1); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); assertEquals(maxSeqId, seqId); @@ -1070,7 +1071,7 @@ public class TestHRegion { } } - class IsFlushWALMarker implements ArgumentMatcher { + static class IsFlushWALMarker implements ArgumentMatcher { volatile FlushAction[] actions; public IsFlushWALMarker(FlushAction... actions) { this.actions = actions; @@ -2192,7 +2193,7 @@ public class TestHRegion { deleteMap.put(family, kvs); region.delete(deleteMap, Durability.SYNC_WAL); } catch (Exception e) { - assertTrue("Family " + new String(family) + " does not exist", false); + fail("Family " + new String(family, StandardCharsets.UTF_8) + " does not exist"); } // testing non existing family @@ -2205,7 +2206,8 @@ public class TestHRegion { } catch (Exception e) { ok = true; } - assertEquals("Family " + new String(family) + " does exist", true, ok); + assertEquals("Family " + new String(family, StandardCharsets.UTF_8) + " does exist", + true, ok); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; @@ -3467,18 +3469,18 @@ public class TestHRegion { List results = new ArrayList<>(); assertTrue(s.next(results)); - assertEquals(results.size(), 1); + assertEquals(1, results.size()); results.clear(); assertTrue(s.next(results)); - assertEquals(results.size(), 3); + assertEquals(3, results.size()); assertTrue("orderCheck", CellUtil.matchingFamily(results.get(0), cf_alpha)); assertTrue("orderCheck", CellUtil.matchingFamily(results.get(1), cf_essential)); assertTrue("orderCheck", CellUtil.matchingFamily(results.get(2), cf_joined)); results.clear(); assertFalse(s.next(results)); - assertEquals(results.size(), 0); + assertEquals(0, results.size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; @@ -3564,16 +3566,19 @@ public class TestHRegion { while (true) { boolean more = s.next(results, scannerContext); if ((index >> 1) < 5) { - if (index % 2 == 0) - assertEquals(results.size(), 3); - else - assertEquals(results.size(), 1); - } else - assertEquals(results.size(), 1); + if (index % 2 == 0) { + assertEquals(3, results.size()); + } else { + assertEquals(1, results.size()); + } + } else { + assertEquals(1, results.size()); + } results.clear(); index++; - if (!more) + if (!more) { break; + } } } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -4448,7 +4453,7 @@ public class TestHRegion { // after all increment finished, the row will increment to 20*100 = 2000 int threadNum = 20; int incCounter = 100; - long expected = threadNum * incCounter; + long expected = (long) threadNum * incCounter; Thread[] incrementers = new Thread[threadNum]; Thread flushThread = new Thread(flusher); for (int i = 0; i < threadNum; i++) { @@ -4470,7 +4475,7 @@ public class TestHRegion { List kvs = res.getColumnCells(Incrementer.family, Incrementer.qualifier); // we just got the latest version - assertEquals(kvs.size(), 1); + assertEquals(1, kvs.size()); Cell kv = kvs.get(0); assertEquals(expected, Bytes.toLong(kv.getValueArray(), kv.getValueOffset())); this.region = null; @@ -4561,7 +4566,7 @@ public class TestHRegion { List kvs = res.getColumnCells(Appender.family, Appender.qualifier); // we just got the latest version - assertEquals(kvs.size(), 1); + assertEquals(1, kvs.size()); Cell kv = kvs.get(0); byte[] appendResult = new byte[kv.getValueLength()]; System.arraycopy(kv.getValueArray(), kv.getValueOffset(), appendResult, 0, kv.getValueLength()); @@ -6150,7 +6155,7 @@ public class TestHRegion { r = region.get(new Get(row)); byte[] val = r.getValue(fam1, q1); assertNotNull(val); - assertEquals(Bytes.toLong(val), 1L); + assertEquals(1L, Bytes.toLong(val)); // Increment with a TTL of 5 seconds Increment incr = new Increment(row).addColumn(fam1, q1, 1L); @@ -6161,7 +6166,7 @@ public class TestHRegion { r = region.get(new Get(row)); val = r.getValue(fam1, q1); assertNotNull(val); - assertEquals(Bytes.toLong(val), 2L); + assertEquals(2L, Bytes.toLong(val)); // Increment time to T+25 seconds edge.incrementTime(5000); @@ -6170,7 +6175,7 @@ public class TestHRegion { r = region.get(new Get(row)); val = r.getValue(fam1, q1); assertNotNull(val); - assertEquals(Bytes.toLong(val), 1L); + assertEquals(1L, Bytes.toLong(val)); // Increment time to T+30 seconds edge.incrementTime(5000); @@ -6199,14 +6204,14 @@ public class TestHRegion { Result result = region.get(new Get(row)); Cell c = result.getColumnLatestCell(fam1, qual1); assertNotNull(c); - assertEquals(c.getTimestamp(), 10L); + assertEquals(10L, c.getTimestamp()); edge.setValue(1); // clock goes back region.increment(inc); result = region.get(new Get(row)); c = result.getColumnLatestCell(fam1, qual1); - assertEquals(c.getTimestamp(), 11L); - assertEquals(Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength()), 2L); + assertEquals(11L, c.getTimestamp()); + assertEquals(2L, Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength())); } @Test @@ -6224,13 +6229,13 @@ public class TestHRegion { Result result = region.get(new Get(row)); Cell c = result.getColumnLatestCell(fam1, qual1); assertNotNull(c); - assertEquals(c.getTimestamp(), 10L); + assertEquals(10L, c.getTimestamp()); edge.setValue(1); // clock goes back region.append(a); result = region.get(new Get(row)); c = result.getColumnLatestCell(fam1, qual1); - assertEquals(c.getTimestamp(), 11L); + assertEquals(11L, c.getTimestamp()); byte[] expected = new byte[qual1.length*2]; System.arraycopy(qual1, 0, expected, 0, qual1.length); @@ -6255,7 +6260,7 @@ public class TestHRegion { Result result = region.get(new Get(row)); Cell c = result.getColumnLatestCell(fam1, qual1); assertNotNull(c); - assertEquals(c.getTimestamp(), 10L); + assertEquals(10L, c.getTimestamp()); edge.setValue(1); // clock goes back p = new Put(row); @@ -6264,7 +6269,7 @@ public class TestHRegion { region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(qual1), p, false); result = region.get(new Get(row)); c = result.getColumnLatestCell(fam1, qual1); - assertEquals(c.getTimestamp(), 10L); + assertEquals(10L, c.getTimestamp()); assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), qual2, 0, qual2.length)); @@ -6304,9 +6309,9 @@ public class TestHRegion { }; OperationStatus[] status = region.batchMutate(mutations); - assertEquals(status[0].getOperationStatusCode(), OperationStatusCode.SUCCESS); - assertEquals(status[1].getOperationStatusCode(), OperationStatusCode.SANITY_CHECK_FAILURE); - assertEquals(status[2].getOperationStatusCode(), OperationStatusCode.SUCCESS); + assertEquals(OperationStatusCode.SUCCESS, status[0].getOperationStatusCode()); + assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, status[1].getOperationStatusCode()); + assertEquals(OperationStatusCode.SUCCESS, status[2].getOperationStatusCode()); // test with a row lock held for a long time @@ -6347,8 +6352,8 @@ public class TestHRegion { // this will wait for the row lock, and it will eventually succeed OperationStatus[] status = region.batchMutate(mutations); - assertEquals(status[0].getOperationStatusCode(), OperationStatusCode.SUCCESS); - assertEquals(status[1].getOperationStatusCode(), OperationStatusCode.SUCCESS); + assertEquals(OperationStatusCode.SUCCESS, status[0].getOperationStatusCode()); + assertEquals(OperationStatusCode.SUCCESS, status[1].getOperationStatusCode()); return null; } }); @@ -6374,7 +6379,7 @@ public class TestHRegion { Result result = region.get(new Get(row)); Cell c = result.getColumnLatestCell(fam1, qual1); assertNotNull(c); - assertEquals(c.getTimestamp(), 10L); + assertEquals(10L, c.getTimestamp()); edge.setValue(1); // clock goes back p = new Put(row); @@ -6386,7 +6391,7 @@ public class TestHRegion { new BinaryComparator(qual1), rm, false)); result = region.get(new Get(row)); c = result.getColumnLatestCell(fam1, qual1); - assertEquals(c.getTimestamp(), 10L); + assertEquals(10L, c.getTimestamp()); LOG.info("c value " + Bytes.toStringBinary(c.getValueArray(), c.getValueOffset(), c.getValueLength())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 3c25f6b96b..dcd7ff7b9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -232,7 +232,7 @@ public class TestHRegionReplayEvents { // flush region FlushResultImpl flush = (FlushResultImpl)secondaryRegion.flush(true); - assertEquals(flush.result, FlushResultImpl.Result.CANNOT_FLUSH); + assertEquals(FlushResultImpl.Result.CANNOT_FLUSH, flush.result); verifyData(secondaryRegion, 0, 1000, cq, families); @@ -1207,13 +1207,13 @@ public class TestHRegionReplayEvents { // primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false FlushResultImpl result = primaryRegion.flushcache(true, false, FlushLifeCycleTracker.DUMMY); assertNotNull(result); - assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY); + assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result); assertFalse(result.wroteFlushWalMarker); // request flush again, but this time with writeFlushRequestWalMarker = true result = primaryRegion.flushcache(true, true, FlushLifeCycleTracker.DUMMY); assertNotNull(result); - assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY); + assertEquals(FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, result.result); assertTrue(result.wroteFlushWalMarker); List flushes = Lists.newArrayList(); @@ -1644,7 +1644,7 @@ public class TestHRegionReplayEvents { hFileFactory.withFileContext(new HFileContext()); HFile.Writer writer = hFileFactory.create(); try { - writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l, + writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0L, KeyValue.Type.Put.getCode(), valueBytes))); } finally { writer.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index b6b39bcbe3..034d9d7391 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -186,6 +186,7 @@ public class TestHRegionServerBulkLoad { this.tableName = tableName; } + @Override public void doAnAction() throws Exception { long iteration = numBulkLoads.getAndIncrement(); Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", @@ -295,6 +296,7 @@ public class TestHRegionServerBulkLoad { table = UTIL.getConnection().getTable(TABLE_NAME); } + @Override public void doAnAction() throws Exception { Scan s = new Scan(); for (byte[] family : targetFamilies) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index b8d3ec7a59..288333b343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -756,7 +756,7 @@ public class TestHStore { } @Override - public void write(byte[] buf, int offset, int length) throws IOException { + public synchronized void write(byte[] buf, int offset, int length) throws IOException { System.err.println("faulty stream write at pos " + getPos()); injectFault(); super.write(buf, offset, length); @@ -1551,7 +1551,7 @@ public class TestHStore { ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(5).build(), hook); } - private class MyStore extends HStore { + private static class MyStore extends HStore { private final MyStoreHook hook; MyStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration @@ -1576,7 +1576,7 @@ public class TestHStore { } } - private abstract class MyStoreHook { + private abstract static class MyStoreHook { void getScanners(MyStore store) throws IOException { } @@ -1595,7 +1595,7 @@ public class TestHStore { MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() {}); MemStoreSizing memStoreSizing = new MemStoreSizing(); long ts = System.currentTimeMillis(); - long seqID = 1l; + long seqID = 1L; // Add some data to the region and do some flushes for (int i = 1; i < 10; i++) { store.add(createCell(Bytes.toBytes("row" + i), qf1, ts, seqID++, Bytes.toBytes("")), @@ -1663,6 +1663,7 @@ public class TestHStore { return this.heap; } + @Override public void run() { scanner.trySwitchToStreamRead(); heap = scanner.heap; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index 31f16ea03d..1a38d7f710 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -90,11 +90,13 @@ public class TestHStoreFile extends HBaseTestCase { private static final int CKBYTES = 512; private static String TEST_FAMILY = "cf"; + @Override @Before public void setUp() throws Exception { super.setUp(); } + @Override @After public void tearDown() throws Exception { super.tearDown(); @@ -505,8 +507,8 @@ public class TestHStoreFile extends HBaseTestCase { long now = System.currentTimeMillis(); for (int i = 0; i < 2000; i += 2) { String row = String.format(localFormatter, i); - KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(), - "col".getBytes(), now, "value".getBytes()); + KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), + Bytes.toBytes("col"), now, Bytes.toBytes("value")); writer.append(kv); } writer.close(); @@ -523,12 +525,13 @@ public class TestHStoreFile extends HBaseTestCase { for (int i = 0; i < 2000; i++) { String row = String.format(localFormatter, i); TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); - columns.add("family:col".getBytes()); + columns.add(Bytes.toBytes("family:col")); - Scan scan = new Scan(row.getBytes(),row.getBytes()); - scan.addColumn("family".getBytes(), "family:col".getBytes()); + Scan scan = new Scan(Bytes.toBytes(row),Bytes.toBytes(row)); + scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes("family:col")); HStore store = mock(HStore.class); - when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family")); + when(store.getColumnFamilyDescriptor()) + .thenReturn(ColumnFamilyDescriptorBuilder.of("family")); boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE); if (i % 2 == 0) { if (!exists) falseNeg++; @@ -592,8 +595,8 @@ public class TestHStoreFile extends HBaseTestCase { long now = System.currentTimeMillis(); for (int i = 0; i < 2000; i += 2) { String row = String.format(localFormatter, i); - KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(), - "col".getBytes(), now, KeyValue.Type.DeleteFamily, "value".getBytes()); + KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), + Bytes.toBytes("col"), now, KeyValue.Type.DeleteFamily, Bytes.toBytes("value")); writer.append(kv); } writer.close(); @@ -696,9 +699,8 @@ public class TestHStoreFile extends HBaseTestCase { String row = String.format(localFormatter, i); String col = String.format(localFormatter, j); for (int k= 0; k < versions; ++k) { // versions - KeyValue kv = new KeyValue(row.getBytes(), - "family".getBytes(), ("col" + col).getBytes(), - now-k, Bytes.toBytes((long)-1)); + KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), + Bytes.toBytes("col" + col), now-k, Bytes.toBytes(-1L)); writer.append(kv); } } @@ -713,7 +715,8 @@ public class TestHStoreFile extends HBaseTestCase { assertEquals(expKeys[x], reader.generalBloomFilter.getKeyCount()); HStore store = mock(HStore.class); - when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family")); + when(store.getColumnFamilyDescriptor()) + .thenReturn(ColumnFamilyDescriptorBuilder.of("family")); // check false positives rate int falsePos = 0; int falseNeg = 0; @@ -722,10 +725,10 @@ public class TestHStoreFile extends HBaseTestCase { String row = String.format(localFormatter, i); String col = String.format(localFormatter, j); TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); - columns.add(("col" + col).getBytes()); + columns.add(Bytes.toBytes("col" + col)); - Scan scan = new Scan(row.getBytes(),row.getBytes()); - scan.addColumn("family".getBytes(), ("col"+col).getBytes()); + Scan scan = new Scan(Bytes.toBytes(row),Bytes.toBytes(row)); + scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes(("col"+col))); boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java index 24ab7a19c4..d9956de8c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java @@ -88,6 +88,7 @@ public class TestKeyValueHeap extends HBaseTestCase { return actual; } + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -185,7 +186,7 @@ public class TestKeyValueHeap extends HBaseTestCase { for (KeyValueScanner scanner : scanners) { // Verify that close is called and only called once for each scanner assertTrue(((SeekTestScanner) scanner).isClosed()); - assertEquals(((SeekTestScanner) scanner).getClosedNum(), 1); + assertEquals(1, ((SeekTestScanner) scanner).getClosedNum()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 2684bdf90c..0840a63167 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -108,7 +108,8 @@ public class TestMajorCompaction { // Increment the least significant character so we get to next row. secondRowBytes[START_KEY_BYTES.length - 1]++; thirdRowBytes = START_KEY_BYTES.clone(); - thirdRowBytes[START_KEY_BYTES.length - 1] += 2; + thirdRowBytes[START_KEY_BYTES.length - 1] = + (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2); } @Before @@ -277,7 +278,7 @@ public class TestMajorCompaction { // Force major compaction. r.compact(true); - assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1); + assertEquals(1, r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size()); result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100)); assertTrue("Second row should still be deleted", result.isEmpty()); @@ -398,8 +399,8 @@ public class TestMajorCompaction { private void createSmallerStoreFile(final HRegion region) throws IOException { Table loader = new RegionAsTable(region); - HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" + - "bbb").getBytes(), null); + HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), Bytes.toBytes("" + + "bbb"), null); region.flush(true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java index 8d74d8def7..795fa2f8c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreLAB.java @@ -62,8 +62,8 @@ public class TestMemStoreLAB { @BeforeClass public static void setUpBeforeClass() throws Exception { - ChunkCreator.initialize(1 * 1024, false, 50*1024000l, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, - null); + ChunkCreator.initialize(1 * 1024, false, 50 * 1024000L, 0.2f, + MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java index 98b0761a62..4ae92a4c24 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java @@ -76,7 +76,8 @@ public class TestMinorCompaction { // Increment the least significant character so we get to next row. secondRowBytes[START_KEY_BYTES.length - 1]++; thirdRowBytes = START_KEY_BYTES.clone(); - thirdRowBytes[START_KEY_BYTES.length - 1] += 2; + thirdRowBytes[START_KEY_BYTES.length - 1] = + (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2); col1 = Bytes.toBytes("column1"); col2 = Bytes.toBytes("column2"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java index 084c4c9163..a09c8cd0b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiVersionConcurrencyControl.java @@ -46,6 +46,7 @@ public class TestMultiVersionConcurrencyControl { private Random rnd = new Random(); public boolean failed = false; + @Override public void run() { while (!finished.get()) { MultiVersionConcurrencyControl.WriteEntry e = @@ -82,6 +83,7 @@ public class TestMultiVersionConcurrencyControl { final AtomicBoolean readerFailed = new AtomicBoolean(false); final AtomicLong failedAt = new AtomicLong(); Runnable reader = new Runnable() { + @Override public void run() { long prev = mvcc.getReadPoint(); while (!finished.get()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java index a8b0f226e1..82b0251607 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java @@ -24,30 +24,31 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ipc.PriorityFunction; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Get; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.junit.Before; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.mockito.Mockito; - -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; /** * Tests that verify certain RPCs get a higher QoS. @@ -85,7 +86,7 @@ public class TestPriorityRpc { RegionSpecifier regionSpecifier = regionSpecifierBuilder.build(); getRequestBuilder.setRegion(regionSpecifier); Get.Builder getBuilder = Get.newBuilder(); - getBuilder.setRow(UnsafeByteOperations.unsafeWrap("somerow".getBytes())); + getBuilder.setRow(UnsafeByteOperations.unsafeWrap(Bytes.toBytes("somerow"))); getRequestBuilder.setGet(getBuilder.build()); GetRequest getRequest = getRequestBuilder.build(); RequestHeader header = headerBuilder.build(); @@ -96,7 +97,8 @@ public class TestPriorityRpc { RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class); Mockito.when(mockRpc.getRegion(Mockito.any())).thenReturn(mockRegion); Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo); - Mockito.when(mockRegionInfo.getTable()).thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()); + Mockito.when(mockRegionInfo.getTable()) + .thenReturn(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()); // Presume type. ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); assertEquals(HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, getRequest, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index a497bf4a12..59a0c310f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -71,7 +71,7 @@ public class TestRegionReplicas { private static final int NB_SERVERS = 1; private static Table table; - private static final byte[] row = "TestRegionReplicas".getBytes(); + private static final byte[] row = Bytes.toBytes("TestRegionReplicas"); private static HRegionInfo hriPrimary; private static HRegionInfo hriSecondary; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java index d1bf773655..9a02a9dcd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithModifyTable.java @@ -111,7 +111,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be more than 1", totalRegions, 3); + assertEquals("the number of regions should be more than 1", 3, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -132,7 +132,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 30", totalRegions, 30); + assertEquals("the number of regions should be equal to 30", 30, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -148,7 +148,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be 3", totalRegions, 3); + assertEquals("the number of regions should be 3", 3, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -164,7 +164,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 2", totalRegions, 2); + assertEquals("the number of regions should be reduced to 2", 2, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -181,7 +181,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be reduced to 40", totalRegions, 40); + assertEquals("the number of regions should be reduced to 40", 40, totalRegions); } finally { disableAndDeleteTable(tableName); } @@ -198,7 +198,7 @@ public class TestRegionReplicasWithModifyTable { List onlineRegions2 = getSecondaryRS().getRegions(tableName); List onlineRegions3 = getTertiaryRS().getRegions(tableName); int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size(); - assertEquals("the number of regions should be equal to 45", totalRegions, 3 * 15); + assertEquals("the number of regions should be equal to 45", 3 * 15, totalRegions); } finally { disableAndDeleteTable(tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java index 0122674ce1..6172d612cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java @@ -37,7 +37,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -50,7 +50,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -60,12 +60,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -76,12 +76,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l), (long) (2l * 1024l * 1024l * 1024l)); + new MemStoreSize(3L * 1024 * 1024, 2L * 1024 * 1024 * 1024); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -91,12 +91,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize(3L * 1024 * 1024 * 1024, 1L * 1024 * 1024 * 1024); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -107,12 +107,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l), (long) (2l * 1024l * 1024l * 1024l)); + new MemStoreSize(3L * 1024 * 1024, 2L * 1024 * 1024 * 1024); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index ea27ee5586..b63b84492e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -32,13 +32,10 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -51,6 +48,11 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; + /** * Tests on the region server, without the master. @@ -61,7 +63,7 @@ public class TestRegionServerNoMaster { private static final Logger LOG = LoggerFactory.getLogger(TestRegionServerNoMaster.class); private static final int NB_SERVERS = 1; private static Table table; - private static final byte[] row = "ee".getBytes(); + private static final byte[] row = Bytes.toBytes("ee"); private static HRegionInfo hri; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index c6dce67edc..afbb48d6a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -117,7 +117,8 @@ public class TestScanner { // Increment the least significant character so we get to next row. secondRowBytes[START_KEY_BYTES.length - 1]++; thirdRowBytes = START_KEY_BYTES.clone(); - thirdRowBytes[START_KEY_BYTES.length - 1] += 2; + thirdRowBytes[START_KEY_BYTES.length - 1] = + (byte) (thirdRowBytes[START_KEY_BYTES.length - 1] + 2); col1 = Bytes.toBytes("column1"); } @@ -589,6 +590,7 @@ public class TestScanner { if (flushIndex == count) { LOG.info("Starting flush at flush index " + flushIndex); Thread t = new Thread() { + @Override public void run() { try { region.flush(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java index 0c014fd5e7..63ea993f0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java @@ -233,6 +233,7 @@ public class TestScannerWithBulkload { // Create a scanner and then do bulk load final CountDownLatch latch = new CountDownLatch(1); new Thread() { + @Override public void run() { try { Put put1 = new Put(Bytes.toBytes("row5")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 613282f951..8519c3c6a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -76,7 +76,7 @@ public class TestSplitLogWorker { private SplitLogWorker slw; private ExecutorService executorService; - class DummyServer implements Server { + static class DummyServer implements Server { private ZKWatcher zkw; private Configuration conf; private CoordinatedStateManager cm; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index ab2d4b4574..c80fc2ed3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -673,7 +673,7 @@ public class TestSplitTransactionOnCluster { FileSystem fs = TESTING_UTIL.getDFSCluster().getFileSystem(); Map storefiles = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); - assertEquals("Expected nothing but found " + storefiles.toString(), storefiles.size(), 0); + assertEquals("Expected nothing but found " + storefiles.toString(), 0, storefiles.size()); // find a splittable region. Refresh the regions list regions = cluster.getRegions(tableName); @@ -696,8 +696,8 @@ public class TestSplitTransactionOnCluster { HBaseFsck.debugLsr(conf, new Path("/")); Map storefilesAfter = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName); - assertEquals("Expected nothing but found " + storefilesAfter.toString(), - storefilesAfter.size(), 0); + assertEquals("Expected nothing but found " + storefilesAfter.toString(), 0, + storefilesAfter.size()); hri = region.getRegionInfo(); // split parent AssignmentManager am = cluster.getMaster().getAssignmentManager(); @@ -755,7 +755,7 @@ public class TestSplitTransactionOnCluster { region.flush(true); HStore store = region.getStore(Bytes.toBytes("f")); Collection storefiles = store.getStorefiles(); - assertEquals(storefiles.size(), 1); + assertEquals(1, storefiles.size()); assertFalse(region.hasReferences()); Path referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 1f5db50b35..2c679d8d43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -164,6 +164,7 @@ public class TestStoreScanner { new KeyValueScanner[] { new KeyValueScanFixture(CellComparator.getInstance(), CELL_GRID) })); } + @Override protected void resetKVHeap(List scanners, CellComparator comparator) throws IOException { if (count == null) { @@ -172,6 +173,7 @@ public class TestStoreScanner { heap = new KeyValueHeapWithCount(scanners, comparator, count); } + @Override protected boolean trySkipToNextRow(Cell cell) throws IOException { boolean optimized = super.trySkipToNextRow(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) @@ -182,6 +184,7 @@ public class TestStoreScanner { return optimized; } + @Override protected boolean trySkipToNextColumn(Cell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) @@ -227,6 +230,7 @@ public class TestStoreScanner { new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) })); } + @Override protected boolean trySkipToNextColumn(Cell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) @@ -255,6 +259,7 @@ public class TestStoreScanner { new KeyValueScanFixture(CellComparator.getInstance(), CELL_WITH_VERSIONS) })); } + @Override protected boolean trySkipToNextColumn(Cell cell) throws IOException { boolean optimized = super.trySkipToNextColumn(cell); LOG.info("Cell=" + cell + ", nextIndex=" + CellUtil.toString(getNextIndexedKey(), false) @@ -884,6 +889,7 @@ public class TestStoreScanner { try { final long now = System.currentTimeMillis(); EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() { + @Override public long currentTime() { return now; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java index 52c31d9a27..273535b399 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSyncTimeRangeTracker.java @@ -32,10 +32,12 @@ public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { private static final int NUM_KEYS = 10000000; private static final int NUM_OF_THREADS = 20; + @Override protected TimeRangeTracker getTimeRangeTracker() { return TimeRangeTracker.create(TimeRangeTracker.Type.SYNC); } + @Override protected TimeRangeTracker getTimeRangeTracker(long min, long max) { return TimeRangeTracker.create(TimeRangeTracker.Type.SYNC, min, max); } @@ -77,7 +79,7 @@ public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { assertTrue(trr.getMin() == 0); } - class RandomTestData { + static class RandomTestData { private long[] keys = new long[NUM_KEYS]; private long min = Long.MAX_VALUE; private long max = 0; @@ -107,7 +109,7 @@ public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { } } - class TrtUpdateRunnable implements Runnable { + static class TrtUpdateRunnable implements Runnable { private TimeRangeTracker trt; private RandomTestData data; @@ -116,6 +118,7 @@ public class TestSyncTimeRangeTracker extends TestSimpleTimeRangeTracker { this.data = data; } + @Override public void run() { for (long key : data.keys) { trt.includeTimestamp(key); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 62d22d201b..70bdc4956e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -258,6 +258,7 @@ public class TestWALLockup { // in HBASE-14317. Flush hangs trying to get sequenceid because the ringbuffer is held up // by the zigzaglatch waiting on syncs to come home. Thread t = new Thread ("Flusher") { + @Override public void run() { try { if (region.getMemStoreSize() <= 0) { @@ -444,6 +445,7 @@ public class TestWALLockup { dodgyWAL2.append(region.getRegionInfo(), key, edit, true); Thread t = new Thread("Sync") { + @Override public void run() { try { dodgyWAL2.sync(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index f3bd7eeb86..290f71a4ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -581,7 +581,7 @@ public class TestWalAndCompactingMemStoreFlush { // The total memstores size should be empty assertEquals(0, totalMemstoreSizePhaseV); // Because there is nothing in any memstore the WAL's LSN should be -1 - assertEquals(smallestSeqInRegionCurrentMemstorePhaseV, HConstants.NO_SEQNUM); + assertEquals(HConstants.NO_SEQNUM, smallestSeqInRegionCurrentMemstorePhaseV); // What happens when we hit the memstore limit, but we are not able to find // any Column Family above the threshold? diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 7cdd24d498..13c7a6bc10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -110,7 +111,8 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { final Class fileGenClass, final int inMmax, final int inMin, - final float inRatio) throws IllegalAccessException, InstantiationException { + final float inRatio) throws IllegalAccessException, InstantiationException, + NoSuchMethodException, InvocationTargetException { super(PerfTestCompactionPolicies.class); this.fileGenClass = fileGenClass; this.max = inMmax; @@ -138,7 +140,7 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { new Class[] {Configuration.class, StoreConfigInformation.class }, new Object[] {configuration, store }); - this.generator = fileGenClass.newInstance(); + this.generator = fileGenClass.getDeclaredConstructor().newInstance(); // Used for making paths } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index 932664b406..f8df870065 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -92,11 +92,13 @@ public class TestCompactor { writers.add(realWriter); StoreFileWriter writer = mock(StoreFileWriter.class); doAnswer(new Answer() { + @Override public Object answer(InvocationOnMock invocation) { return realWriter.kvs.add((KeyValue) invocation.getArgument(0)); } }).when(writer).append(any()); doAnswer(new Answer() { + @Override public Object answer(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); return realWriter.data.put((byte[]) args[0], (byte[]) args[1]); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 3ae49c0adf..b43a89d65c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -242,7 +242,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { } } - private class AlwaysIncludeAndSeekNextRowFilter extends FilterBase { + private static class AlwaysIncludeAndSeekNextRowFilter extends FilterBase { @Override public ReturnCode filterKeyValue(final Cell c) throws IOException { @@ -287,7 +287,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { } } - private class AlwaysIncludeFilter extends FilterBase { + private static class AlwaysIncludeFilter extends FilterBase { @Override public ReturnCode filterKeyValue(final Cell c) throws IOException { return ReturnCode.INCLUDE; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 093a512a74..009cca0a7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -289,7 +289,7 @@ public abstract class AbstractTestFSWAL { addEdits(wal, hri2, t2, 2, mvcc, scopes2); // get the regions to flush, it should still read region1. regionsToFlush = wal.findRegionsToForceFlush(); - assertEquals(regionsToFlush.length, 1); + assertEquals(1, regionsToFlush.length); assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]); // flush region 1, and roll the wal file. Only last wal which has entries for region1 should // remain. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index ededcf3cfa..6b55adcb69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -705,9 +705,8 @@ public abstract class AbstractTestWALReplay { try { region.flush(true); fail("Injected exception hasn't been thrown"); - } catch (Throwable t) { - LOG.info("Expected simulated exception when flushing region," - + t.getMessage()); + } catch (IOException e) { + LOG.info("Expected simulated exception when flushing region, {}", e.getMessage()); // simulated to abort server Mockito.doReturn(true).when(rsServices).isAborted(); region.setClosing(false); // region normally does not accept writes after @@ -928,8 +927,7 @@ public abstract class AbstractTestWALReplay { * testcase for https://issues.apache.org/jira/browse/HBASE-15252 */ @Test - public void testDatalossWhenInputError() throws IOException, InstantiationException, - IllegalAccessException { + public void testDatalossWhenInputError() throws Exception { final TableName tableName = TableName.valueOf("testDatalossWhenInputError"); final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName); @@ -964,7 +962,7 @@ public abstract class AbstractTestWALReplay { Class logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, AbstractFSWALProvider.Reader.class); - AbstractFSWALProvider.Reader reader = logReaderClass.newInstance(); + AbstractFSWALProvider.Reader reader = logReaderClass.getDeclaredConstructor().newInstance(); reader.init(this.fs, editFile, conf, stream); final long headerLength = stream.getPos(); reader.close(); @@ -1108,7 +1106,7 @@ public abstract class AbstractTestWALReplay { // Flusher used in this test. Keep count of how often we are called and // actually run the flush inside here. - class TestFlusher implements FlushRequester { + static class TestFlusher implements FlushRequester { private HRegion r; @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java index 2aebf2b032..5a6137056e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedLogWriter.java @@ -39,7 +39,7 @@ public class InstrumentedLogWriter extends ProtobufLogWriter { public void append(Entry entry) throws IOException { super.append(entry); if (activateFailure && - Bytes.equals(entry.getKey().getEncodedRegionName(), "break".getBytes())) { + Bytes.equals(entry.getKey().getEncodedRegionName(), Bytes.toBytes("break"))) { System.out.println(getClass().getName() + ": I will throw an exception now..."); throw(new IOException("This exception is instrumented and should only be thrown for testing" )); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java index ed711233a6..0f5fda0be1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java @@ -220,8 +220,8 @@ public class TestNamespaceReplication extends TestReplicationBase { if (res.isEmpty()) { LOG.info("Row not available"); } else { - assertEquals(res.size(), 1); - assertArrayEquals(res.value(), val); + assertEquals(1, res.size()); + assertArrayEquals(val, res.value()); break; } Thread.sleep(SLEEP_TIME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index bacda634e3..98b3fdade3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -582,8 +582,8 @@ public class TestPerTableCFReplication { replicatedToAll = false; break; } else { - assertEquals(res.size(), 1); - assertArrayEquals(res.value(), val); + assertEquals(1, res.size()); + assertArrayEquals(val, res.value()); } } if (replicatedToAll) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index afb975d69b..3a7a5752b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -270,7 +270,7 @@ public class TestReplicationBase { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { - assertArrayEquals(res.value(), row); + assertArrayEquals(row, res.value()); break; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java index 1675496aba..7b9dea4947 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java @@ -84,7 +84,7 @@ public class TestReplicationDisableInactivePeer extends TestReplicationBase { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME * NB_RETRIES); } else { - assertArrayEquals(res.value(), row); + assertArrayEquals(row, res.value()); return; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java index 30cd860290..283704539a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java @@ -111,6 +111,7 @@ public class TestReplicationKillRS extends TestReplicationBase { private static Thread killARegionServer(final HBaseTestingUtility utility, final long timeout, final int rs) { Thread killer = new Thread() { + @Override public void run() { try { Thread.sleep(timeout); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index cb47827404..f46a7b17b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -209,7 +209,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { - assertArrayEquals(res.value(), row); + assertArrayEquals(row, res.value()); return; } } @@ -262,7 +262,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME * i); } else { - assertArrayEquals(res.value(), row); + assertArrayEquals(row, res.value()); break; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java index 0a602ada73..b4732771e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java @@ -277,14 +277,14 @@ public class TestReplicationSyncUpTool extends TestReplicationBase { // delete half of the rows for (int i = 0; i < NB_ROWS_IN_BATCH / 2; i++) { String rowKey = "row" + i; - Delete del = new Delete(rowKey.getBytes()); + Delete del = new Delete(Bytes.toBytes(rowKey)); list.add(del); } ht1Source.delete(list); for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { String rowKey = "row" + i; - Delete del = new Delete(rowKey.getBytes()); + Delete del = new Delete(Bytes.toBytes(rowKey)); list.add(del); } ht2Source.delete(list); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index 98f11f7982..b2ecb67931 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -187,7 +187,7 @@ public class TestReplicationWithTags { LOG.info("Row not available"); Thread.sleep(SLEEP_TIME); } else { - assertArrayEquals(res.value(), ROW); + assertArrayEquals(ROW, res.value()); assertEquals(1, TestCoprocessorForTagsAtSink.tags.size()); Tag tag = TestCoprocessorForTagsAtSink.tags.get(0); assertEquals(TAG_TYPE, tag.getType()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java index df845159cc..e69d84ca67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java @@ -143,8 +143,8 @@ public class TestRegionReplicaReplicationEndpoint { assertNotNull(peerConfig); assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey( HTU.getConfiguration())); - assertEquals(peerConfig.getReplicationEndpointImpl(), - RegionReplicaReplicationEndpoint.class.getName()); + assertEquals(RegionReplicaReplicationEndpoint.class.getName(), + peerConfig.getReplicationEndpointImpl()); admin.close(); } @@ -190,8 +190,8 @@ public class TestRegionReplicaReplicationEndpoint { assertNotNull(peerConfig); assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey( HTU.getConfiguration())); - assertEquals(peerConfig.getReplicationEndpointImpl(), - RegionReplicaReplicationEndpoint.class.getName()); + assertEquals(RegionReplicaReplicationEndpoint.class.getName(), + peerConfig.getReplicationEndpointImpl()); admin.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java index d7044dcf79..375e64e698 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java @@ -31,27 +31,61 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.*; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.TestReplicationBase; -import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; - import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Test; import org.junit.Ignore; +import org.junit.Test; import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; + @Category(MediumTests.class) @Ignore("Flaky, needs to be rewritten, see HBASE-19125") @@ -88,7 +122,7 @@ public class TestReplicator extends TestReplicationBase { // have to be replicated separately. final byte[] valueBytes = new byte[8 *1024]; for (int i = 0; i < NUM_ROWS; i++) { - htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) + htable1.put(new Put(Bytes.toBytes("row"+Integer.toString(i))) .addColumn(famName, null, valueBytes) ); } @@ -140,7 +174,7 @@ public class TestReplicator extends TestReplicationBase { // have to be replicated separately. final byte[] valueBytes = new byte[8 *1024]; for (int i = 0; i < NUM_ROWS; i++) { - htable1.put(new Put(("row"+Integer.toString(i)).getBytes()) + htable1.put(new Put(Bytes.toBytes("row"+Integer.toString(i))) .addColumn(famName, null, valueBytes) ); } @@ -409,7 +443,7 @@ public class TestReplicator extends TestReplicationBase { @Override public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, - ClearRegionBlockCacheRequest request) throws ServiceException { + ClearRegionBlockCacheRequest request) throws ServiceException { return delegate.clearRegionBlockCache(controller, request); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java index 43140bc9a5..44c76f8988 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java @@ -363,7 +363,7 @@ public class TestWALEntryStream { appendToLog("foo"); entryBatch = batcher.take(); assertEquals(1, entryBatch.getNbEntries()); - assertEquals(getRow(entryBatch.getWalEntries().get(0)), "foo"); + assertEquals("foo", getRow(entryBatch.getWalEntries().get(0))); } private String getRow(WAL.Entry entry) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java index 2414e5a044..1399b21e68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java @@ -153,7 +153,7 @@ public class TestSecureIPC { UserGroupInformation ugi2 = UserGroupInformation.getCurrentUser(); // check that the login user is okay: - assertSame(ugi, ugi2); + assertSame(ugi2, ugi); assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod()); assertEquals(krbPrincipal, ugi.getUserName()); @@ -280,6 +280,7 @@ public class TestSecureIPC { final Throwable exception[] = new Throwable[1]; Collections.synchronizedList(new ArrayList()); Thread.UncaughtExceptionHandler exceptionHandler = new Thread.UncaughtExceptionHandler() { + @Override public void uncaughtException(Thread th, Throwable ex) { exception[0] = ex; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java index bfc82dbc53..d5eed3ce43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUser.java @@ -120,6 +120,7 @@ public class TestUser { Configuration conf = HBaseConfiguration.create(); final User user = User.createUserForTesting(conf, "testuser", new String[]{"foo"}); final PrivilegedExceptionAction action = new PrivilegedExceptionAction(){ + @Override public String run() throws IOException { User u = User.getCurrent(); return u.getName(); @@ -138,6 +139,7 @@ public class TestUser { // check the exception version username = user.runAs(new PrivilegedExceptionAction(){ + @Override public String run() throws Exception { return User.getCurrent().getName(); } @@ -146,6 +148,7 @@ public class TestUser { // verify that nested contexts work user2.runAs(new PrivilegedExceptionAction(){ + @Override public Object run() throws IOException, InterruptedException{ String nestedName = user.runAs(action); assertEquals("Nest name should match nested user", "testuser", nestedName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java index 06389ab7ee..8a1af20d7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java @@ -123,6 +123,7 @@ public class TestAccessControlFilter extends SecureTestUtil { // test read READER.runAs(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // force a new RS connection @@ -151,6 +152,7 @@ public class TestAccessControlFilter extends SecureTestUtil { // test read with qualifier filter LIMITED.runAs(new PrivilegedExceptionAction() { + @Override public Object run() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // force a new RS connection @@ -178,6 +180,7 @@ public class TestAccessControlFilter extends SecureTestUtil { // test as user with no permission DENIED.runAs(new PrivilegedExceptionAction(){ + @Override public Object run() throws Exception { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // force a new RS connection diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index be1b0e4332..14e94bee8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -306,12 +306,15 @@ public class TestAccessController extends SecureTestUtil { grantGlobal(TEST_UTIL, toGroupEntry(GROUP_WRITE), Permission.Action.WRITE); assertEquals(5, AccessControlLists.getTablePermissions(conf, TEST_TABLE).size()); + int size = 0; try { - assertEquals(5, AccessControlClient.getUserPermissions(systemUserConnection, - TEST_TABLE.toString()).size()); + size = AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()) + .size(); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions. ", e); + fail("error during call of AccessControlClient.getUserPermissions."); } + assertEquals(5, size); } private static void cleanUp() throws Exception { @@ -992,7 +995,7 @@ public class TestAccessController extends SecureTestUtil { } } - public class BulkLoadHelper { + public static class BulkLoadHelper { private final FileSystem fs; private final Path loadPath; private final Configuration conf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java index 1e5ea533c9..252de3a3cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java @@ -56,11 +56,13 @@ public class TestZKSecretWatcher { private static class MockAbortable implements Abortable { private boolean abort; + @Override public void abort(String reason, Throwable e) { LOG.info("Aborting: "+reason, e); abort = true; } + @Override public boolean isAborted() { return abort; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java index 391a844785..9a249c39f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java @@ -46,11 +46,13 @@ public class TestZKSecretWatcherRefreshKeys { private static class MockAbortable implements Abortable { private boolean abort; + @Override public void abort(String reason, Throwable e) { LOG.info("Aborting: "+reason, e); abort = true; } + @Override public boolean isAborted() { return abort; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java index 9da2531379..943d78e4c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java @@ -85,6 +85,7 @@ public class TestDefaultScanLabelGeneratorStack { // Set up for the test SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { VisibilityClient.addLabels(conn, new String[] { SECRET, CONFIDENTIAL }); @@ -102,6 +103,7 @@ public class TestDefaultScanLabelGeneratorStack { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = TEST_UTIL.createTable(tableName, CF)) { @@ -123,6 +125,7 @@ public class TestDefaultScanLabelGeneratorStack { // Test that super user can see all the cells. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -164,6 +167,7 @@ public class TestDefaultScanLabelGeneratorStack { }); TESTUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java index a0703fc461..c7075e35a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java @@ -82,6 +82,7 @@ public class TestEnforcingScanLabelGenerator { // Set up for the test SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { VisibilityClient.addLabels(conn, new String[] { SECRET, CONFIDENTIAL }); @@ -99,6 +100,7 @@ public class TestEnforcingScanLabelGenerator { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = TEST_UTIL.createTable(tableName, CF)) { @@ -120,6 +122,7 @@ public class TestEnforcingScanLabelGenerator { // Test that super user can see all the cells. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -135,6 +138,7 @@ public class TestEnforcingScanLabelGenerator { }); TESTUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 521cafe7ec..76bba48bed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -160,6 +160,7 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit InterruptedException { PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf1); Table table2 = connection.getTable(TABLE_NAME)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java index ba93d190fd..932f63e026 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java @@ -302,6 +302,7 @@ public abstract class TestVisibilityLabels { TEST_UTIL.getHBaseCluster().startRegionServer(); } Thread t1 = new Thread() { + @Override public void run() { List regionServerThreads = TEST_UTIL.getHBaseCluster() .getRegionServerThreads(); @@ -320,6 +321,7 @@ public abstract class TestVisibilityLabels { t1.start(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); Thread t = new Thread() { + @Override public void run() { try { while (!killedRS) { @@ -415,6 +417,7 @@ public abstract class TestVisibilityLabels { public void testSetAndGetUserAuths() throws Throwable { final String user = "user1"; PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { String[] auths = { SECRET, CONFIDENTIAL }; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -441,6 +444,7 @@ public abstract class TestVisibilityLabels { } action = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { GetAuthsResponse authsResponse = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -462,6 +466,7 @@ public abstract class TestVisibilityLabels { // Try doing setAuths once again and there should not be any duplicates action = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { String[] auths1 = { SECRET, CONFIDENTIAL }; GetAuthsResponse authsResponse = null; @@ -491,7 +496,7 @@ public abstract class TestVisibilityLabels { List auths = new ArrayList<>(); for (Result result : results) { Cell labelCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - Cell userAuthCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, user.getBytes()); + Cell userAuthCell = result.getColumnLatestCell(LABELS_TABLE_FAMILY, Bytes.toBytes(user)); if (userAuthCell != null) { auths.add(Bytes.toString(labelCell.getValueArray(), labelCell.getValueOffset(), labelCell.getValueLength())); @@ -503,6 +508,7 @@ public abstract class TestVisibilityLabels { @Test public void testClearUserAuths() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { String[] auths = { SECRET, CONFIDENTIAL, PRIVATE }; String user = "testUser"; @@ -700,32 +706,32 @@ public abstract class TestVisibilityLabels { TEST_UTIL.getAdmin().createTable(desc); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(r1); - put.addColumn(fam, qual, 3l, v1); - put.addColumn(fam, qual2, 3l, v1); - put.addColumn(fam2, qual, 3l, v1); - put.addColumn(fam2, qual2, 3l, v1); + put.addColumn(fam, qual, 3L, v1); + put.addColumn(fam, qual2, 3L, v1); + put.addColumn(fam2, qual, 3L, v1); + put.addColumn(fam2, qual2, 3L, v1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put = new Put(r1); - put.addColumn(fam, qual, 4l, v2); - put.addColumn(fam, qual2, 4l, v2); - put.addColumn(fam2, qual, 4l, v2); - put.addColumn(fam2, qual2, 4l, v2); + put.addColumn(fam, qual, 4L, v2); + put.addColumn(fam, qual2, 4L, v2); + put.addColumn(fam2, qual, 4L, v2); + put.addColumn(fam2, qual2, 4L, v2); put.setCellVisibility(new CellVisibility(PRIVATE)); table.put(put); put = new Put(r2); - put.addColumn(fam, qual, 3l, v1); - put.addColumn(fam, qual2, 3l, v1); - put.addColumn(fam2, qual, 3l, v1); - put.addColumn(fam2, qual2, 3l, v1); + put.addColumn(fam, qual, 3L, v1); + put.addColumn(fam, qual2, 3L, v1); + put.addColumn(fam2, qual, 3L, v1); + put.addColumn(fam2, qual2, 3L, v1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put = new Put(r2); - put.addColumn(fam, qual, 4l, v2); - put.addColumn(fam, qual2, 4l, v2); - put.addColumn(fam2, qual, 4l, v2); - put.addColumn(fam2, qual2, 4l, v2); + put.addColumn(fam, qual, 4L, v2); + put.addColumn(fam, qual2, 4L, v2); + put.addColumn(fam2, qual, 4L, v2); + put.addColumn(fam2, qual2, 4L, v2); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); @@ -860,6 +866,7 @@ public abstract class TestVisibilityLabels { public static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, COPYRIGHT, ACCENT, UNICODE_VIS_TAG, UC1, UC2 }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java index 843ca99386..c14438ec5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.client.Table; public class TestVisibilityLabelsOnNewVersionBehaviorTable extends TestVisibilityLabelsWithDeletes { + @Override protected Table createTable(HColumnDescriptor fam) throws IOException { fam.setNewVersionBehavior(true); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java index a3c926e8c0..8d75156a05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java @@ -85,6 +85,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL { public void testLabelsTableOpsWithDifferentUsers() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, "user1"); @@ -99,6 +100,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL { // Ideally this should not be allowed. this operation should fail or do nothing. action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, "user3"); @@ -115,6 +117,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL { PrivilegedExceptionAction action1 = new PrivilegedExceptionAction() { + @Override public GetAuthsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.getAuths(conn, "user1"); @@ -138,6 +141,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL { PrivilegedExceptionAction action2 = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.clearAuths(conn, new String[] { @@ -162,6 +166,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL { private static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, CONFIDENTIAL, PRIVATE }; try (Connection conn = ConnectionFactory.createConnection(conf)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 072a385e99..dce8591ba4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -301,6 +301,7 @@ public class TestVisibilityLabelsReplication { final boolean nullExpected, final String... auths) throws IOException, InterruptedException { PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf1); Table table2 = connection.getTable(TABLE_NAME)) { @@ -346,6 +347,7 @@ public class TestVisibilityLabelsReplication { public static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE, UNICODE_VIS_TAG }; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -362,6 +364,7 @@ public class TestVisibilityLabelsReplication { public static void setAuths(final Configuration conf) throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.setAuths(conn, new String[] { SECRET, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java index f6ff640c48..ef1ae98dbe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java @@ -122,6 +122,7 @@ public class TestVisibilityLabelsWithACL { SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, null, null, Permission.Action.READ); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); @@ -151,6 +152,7 @@ public class TestVisibilityLabelsWithACL { final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); @@ -175,6 +177,7 @@ public class TestVisibilityLabelsWithACL { final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { Get g = new Get(row1); g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); @@ -204,6 +207,7 @@ public class TestVisibilityLabelsWithACL { SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, null, null, Permission.Action.READ); PrivilegedExceptionAction getAction = new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { Get g = new Get(row1); g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); @@ -222,6 +226,7 @@ public class TestVisibilityLabelsWithACL { public void testLabelsTableOpsWithDifferentUsers() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.addLabels(conn, new String[] { "l1", "l2" }); @@ -237,6 +242,7 @@ public class TestVisibilityLabelsWithACL { .getResult(1).getException().getName()); action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, "user1"); @@ -252,6 +258,7 @@ public class TestVisibilityLabelsWithACL { .getResult(1).getException().getName()); action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.setAuths(conn, new String[] { CONFIDENTIAL, PRIVATE }, "user1"); @@ -265,6 +272,7 @@ public class TestVisibilityLabelsWithACL { assertTrue(response.getResult(1).getException().getValue().isEmpty()); action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.clearAuths(conn, new String[] { @@ -289,6 +297,7 @@ public class TestVisibilityLabelsWithACL { "user3"); PrivilegedExceptionAction action1 = new PrivilegedExceptionAction() { + @Override public GetAuthsResponse run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { return VisibilityClient.getAuths(conn, "user3"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java index 2d3f607d50..ea1ed100ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java @@ -56,6 +56,7 @@ public class TestVisibilityLabelsWithCustomVisLabService extends TestVisibilityL } // Extending this test from super as we don't verify predefined labels in ExpAsStringVisibilityLabelServiceImpl + @Override @Test public void testVisibilityLabelsInPutsThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); @@ -63,6 +64,7 @@ public class TestVisibilityLabelsWithCustomVisLabService extends TestVisibilityL createTableAndWriteDataWithLabels(tableName, "SAMPLE_LABEL", "TEST"); } + @Override protected List extractAuths(String user, List results) { List auths = new ArrayList<>(); for (Result result : results) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java index 189b37f935..2adcf9efec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java @@ -82,6 +82,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili public void testAddLabels() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { "L1", SECRET, "L2", "invalid~", "L3" }; VisibilityLabelsResponse response = null; @@ -122,6 +123,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili do { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, "ABC", "XYZ" }; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -170,6 +172,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili public void testListLabels() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public ListLabelsResponse run() throws Exception { ListLabelsResponse response = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -200,6 +203,7 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili public void testListLabelsWithRegEx() throws Throwable { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public ListLabelsResponse run() throws Exception { ListLabelsResponse response = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java index 0a7d918fd4..9d536fe2b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java @@ -17,6 +17,17 @@ */ package org.apache.hadoop.hbase.security.visibility; +import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; @@ -42,14 +53,10 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.DefaultEnvironmentEdge; -import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -57,17 +64,6 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - /** * Tests visibility labels with deletes */ @@ -205,7 +201,7 @@ public class TestVisibilityLabelsWithDeletes { public void testVisibilityLabelsWithDeleteFamilyVersion() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - long[] ts = new long[] { 123l, 125l }; + long[] ts = new long[] { 123L, 125L }; try (Table table = createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @@ -215,7 +211,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); - d.addFamilyVersion(fam, 123l); + d.addFamilyVersion(fam, 123L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -243,7 +239,7 @@ public class TestVisibilityLabelsWithDeletes { public void testVisibilityLabelsWithDeleteColumnExactVersion() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - long[] ts = new long[] { 123l, 125l }; + long[] ts = new long[] { 123L, 125L }; try (Table table = createTableAndWriteDataWithLabels(ts, CONFIDENTIAL + "|" + TOPSECRET, SECRET);) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @@ -253,7 +249,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); - d.addColumn(fam, qual, 123l); + d.addColumn(fam, qual, 123L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -291,7 +287,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET+")")); - d.addColumns(fam, qual, 125l); + d.addColumns(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -313,17 +309,17 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -508,7 +504,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -529,7 +525,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -566,7 +562,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -586,7 +582,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -622,7 +618,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -643,7 +639,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -680,7 +676,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -700,7 +696,7 @@ public class TestVisibilityLabelsWithDeletes { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -737,7 +733,7 @@ public class TestVisibilityLabelsWithDeletes { ResultScanner scanner = table.getScanner(s); // The delete would not be able to apply it because of visibility mismatch Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -759,7 +755,7 @@ public class TestVisibilityLabelsWithDeletes { ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); // this will alone match - assertEquals(next.length, 0); + assertEquals(0, next.length); } catch (Throwable t) { throw new IOException(t); } @@ -810,7 +806,7 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); put = new Put(Bytes.toBytes("row1")); put.addColumn(fam, qual, value1); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); @@ -836,13 +832,13 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(CONFIDENTIAL)); scanner = table.getScanner(s); next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); s = new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET)); scanner = table.getScanner(s); Result[] next1 = scanner.next(3); - assertEquals(next1.length, 0); + assertEquals(0, next1.length); } } @@ -886,7 +882,7 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); put = new Put(Bytes.toBytes("row1")); put.addColumn(fam, qual, value1); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); @@ -912,13 +908,13 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(CONFIDENTIAL)); scanner = table.getScanner(s); next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); s = new Scan(); s.setMaxVersions(5); s.setAuthorizations(new Authorizations(SECRET)); scanner = table.getScanner(s); Result[] next1 = scanner.next(3); - assertEquals(next1.length, 0); + assertEquals(0, next1.length); } } @@ -933,11 +929,11 @@ public class TestVisibilityLabelsWithDeletes { hBaseAdmin.createTable(desc); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 123l, value); + put.addColumn(fam, qual, 123L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 124l, value1); + put.addColumn(fam, qual, 124L, value1); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @@ -947,7 +943,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); - d.addColumns(fam, qual, 126l); + d.addColumns(fam, qual, 126L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -957,7 +953,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); - d.addColumn(fam, qual, 123l); + d.addColumn(fam, qual, 123L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -971,7 +967,7 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(CONFIDENTIAL, SECRET)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 0); + assertEquals(0, next.length); } } @Test @@ -987,11 +983,11 @@ public class TestVisibilityLabelsWithDeletes { try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put1 = new Put(Bytes.toBytes("row1")); - put1.addColumn(fam, qual, 123l, value); + put1.addColumn(fam, qual, 123L, value); put1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); Put put2 = new Put(Bytes.toBytes("row1")); - put2.addColumn(fam, qual, 123l, value1); + put2.addColumn(fam, qual, 123L, value1); put2.setCellVisibility(new CellVisibility(SECRET)); table.put(createList(put1, put2)); @@ -1000,7 +996,7 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(CONFIDENTIAL, SECRET)); ResultScanner scanner = table.getScanner(s); - assertEquals(scanner.next(3).length, 1); + assertEquals(1, scanner.next(3).length); scanner.close(); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @@ -1010,7 +1006,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); - d.addColumn(fam, qual, 123l); + d.addColumn(fam, qual, 123L); table.delete(d); } @@ -1018,7 +1014,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); - d.addColumn(fam, qual, 123l); + d.addColumn(fam, qual, 123L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -1031,7 +1027,7 @@ public class TestVisibilityLabelsWithDeletes { s.setMaxVersions(5); s.setAuthorizations(new Authorizations(CONFIDENTIAL)); scanner = table.getScanner(s); - assertEquals(scanner.next(3).length, 0); + assertEquals(0, scanner.next(3).length); scanner.close(); } } @@ -1126,22 +1122,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1162,36 +1158,36 @@ public class TestVisibilityLabelsWithDeletes { List puts = new ArrayList<>(5); Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 123l, value); + put.addColumn(fam, qual, 123L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 124l, value); + put.addColumn(fam, qual, 124L, value); put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 125l, value); + put.addColumn(fam, qual, 125L, value); put.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 126l, value); + put.addColumn(fam, qual, 126L, value); put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 127l, value); + put.addColumn(fam, qual, 127L, value); put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); puts.add(put); TEST_UTIL.getAdmin().flush(tableName); put = new Put(Bytes.toBytes("row2")); - put.addColumn(fam, qual, 127l, value); + put.addColumn(fam, qual, 127L, value); put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); puts.add(put); @@ -1212,28 +1208,28 @@ public class TestVisibilityLabelsWithDeletes { List puts = new ArrayList<>(5); Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 123l, value); + put.addColumn(fam, qual, 123L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 124l, value); + put.addColumn(fam, qual, 124L, value); put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 125l, value); + put.addColumn(fam, qual, 125L, value); put.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual1, 126l, value); + put.addColumn(fam, qual1, 126L, value); put.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual2, 127l, value); + put.addColumn(fam, qual2, 127L, value); put.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); puts.add(put); @@ -1253,23 +1249,23 @@ public class TestVisibilityLabelsWithDeletes { hBaseAdmin.createTable(desc); List puts = new ArrayList<>(5); Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 123l, value); + put.addColumn(fam, qual, 123L, value); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 124l, value); + put.addColumn(fam, qual, 124L, value); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 125l, value); + put.addColumn(fam, qual, 125L, value); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 126l, value); + put.addColumn(fam, qual, 126L, value); puts.add(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 127l, value); + put.addColumn(fam, qual, 127L, value); puts.add(put); Table table = TEST_UTIL.getConnection().getTable(tableName); @@ -1278,7 +1274,7 @@ public class TestVisibilityLabelsWithDeletes { TEST_UTIL.getAdmin().flush(tableName); put = new Put(Bytes.toBytes("row2")); - put.addColumn(fam, qual, 127l, value); + put.addColumn(fam, qual, 127L, value); table.put(put); return table; @@ -1300,7 +1296,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET+")")); - d.addColumn(fam, qual, 125l); + d.addColumn(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -1322,27 +1318,27 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1386,22 +1382,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1417,7 +1413,7 @@ public class TestVisibilityLabelsWithDeletes { try (Table table = doPuts(tableName)) { TEST_UTIL.getAdmin().flush(tableName); Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 128l, value); + put.addColumn(fam, qual, 128L, value); put.setCellVisibility(new CellVisibility(TOPSECRET)); table.put(put); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @@ -1449,27 +1445,27 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 128l); + assertEquals(128L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1477,7 +1473,7 @@ public class TestVisibilityLabelsWithDeletes { current.getRowLength(), row2, 0, row2.length)); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 129l, value); + put.addColumn(fam, qual, 129L, value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); @@ -1493,7 +1489,7 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 129l); + assertEquals(129L, current.getTimestamp()); } } @Test @@ -1521,7 +1517,7 @@ public class TestVisibilityLabelsWithDeletes { SUPERUSER.runAs(actiona); TEST_UTIL.getAdmin().flush(tableName); Put put = new Put(Bytes.toBytes("row3")); - put.addColumn(fam, qual, 127l, value); + put.addColumn(fam, qual, 127L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE)); table.put(put); TEST_UTIL.getAdmin().flush(tableName); @@ -1539,22 +1535,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1598,12 +1594,12 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1623,7 +1619,7 @@ public class TestVisibilityLabelsWithDeletes { public Void run() throws Exception { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addColumns(fam, qual, 125l); + d.addColumns(fam, qual, 125L); try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { table.delete(d); @@ -1647,24 +1643,24 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), qual1, 0, qual1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), qual2, 0, qual2.length)); } @@ -1681,11 +1677,11 @@ public class TestVisibilityLabelsWithDeletes { hBaseAdmin.createTable(desc); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual1, 125l, value); + put.addColumn(fam, qual1, 125L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual1, 126l, value); + put.addColumn(fam, qual1, 126L, value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getAdmin().flush(tableName); @@ -1694,11 +1690,11 @@ public class TestVisibilityLabelsWithDeletes { public Void run() throws Exception { Delete d1 = new Delete(row1); d1.setCellVisibility(new CellVisibility(SECRET)); - d1.addColumns(fam, qual, 126l); + d1.addColumns(fam, qual, 126L); Delete d2 = new Delete(row1); d2.setCellVisibility(new CellVisibility(CONFIDENTIAL)); - d2.addColumns(fam, qual1, 125l); + d2.addColumns(fam, qual1, 125L); try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -1715,7 +1711,7 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); } } @Test @@ -1729,11 +1725,11 @@ public class TestVisibilityLabelsWithDeletes { hBaseAdmin.createTable(desc); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual1, 125l, value); + put.addColumn(fam, qual1, 125L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual1, 126l, value); + put.addColumn(fam, qual1, 126L, value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); TEST_UTIL.getAdmin().flush(tableName); @@ -1742,11 +1738,11 @@ public class TestVisibilityLabelsWithDeletes { public Void run() throws Exception { Delete d1 = new Delete(row1); d1.setCellVisibility(new CellVisibility(SECRET)); - d1.addColumns(fam, qual, 126l); + d1.addColumns(fam, qual, 126L); Delete d2 = new Delete(row1); d2.setCellVisibility(new CellVisibility(CONFIDENTIAL)); - d2.addColumns(fam, qual1, 126l); + d2.addColumns(fam, qual1, 126L); try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -1763,7 +1759,7 @@ public class TestVisibilityLabelsWithDeletes { s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); - assertEquals(next.length, 1); + assertEquals(1, next.length); } } @Test @@ -1838,27 +1834,27 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1881,7 +1877,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); - d.addFamily(fam, 126l); + d.addFamily(fam, 126L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -1903,17 +1899,17 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1936,7 +1932,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET+")")); - d.addFamily(fam, 126l); + d.addFamily(fam, 126L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -1948,7 +1944,7 @@ public class TestVisibilityLabelsWithDeletes { TEST_UTIL.getAdmin().flush(tableName); Put put = new Put(Bytes.toBytes("row3")); - put.addColumn(fam, qual, 127l, value); + put.addColumn(fam, qual, 127L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "&" + PRIVATE)); table.put(put); TEST_UTIL.getAdmin().flush(tableName); @@ -1966,7 +1962,7 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -1989,7 +1985,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addFamily(fam, 125l); + d.addFamily(fam, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2010,22 +2006,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2041,7 +2037,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addFamily(fam, 127l); + d.addFamily(fam, 127L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2061,18 +2057,18 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); } } @@ -2100,11 +2096,11 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); - d.addFamilyVersion(fam, 123l); + d.addFamilyVersion(fam, 123L); table.delete(d); d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addFamilyVersion(fam, 125l); + d.addFamilyVersion(fam, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2126,17 +2122,17 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); } } @@ -2153,11 +2149,11 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); - d.addColumn(fam, qual, 126l); + d.addColumn(fam, qual, 126L); table.delete(d); d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addFamilyVersion(fam, 125l); + d.addFamilyVersion(fam, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2179,17 +2175,17 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); // Issue 2nd delete actiona = new PrivilegedExceptionAction() { @Override @@ -2218,12 +2214,12 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); } } @@ -2258,7 +2254,7 @@ public class TestVisibilityLabelsWithDeletes { d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addFamilyVersion(fam, 125l); + d.addFamilyVersion(fam, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2280,17 +2276,17 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); // Issue 2nd delete actiona = new PrivilegedExceptionAction() { @Override @@ -2319,12 +2315,12 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); } } @@ -2340,7 +2336,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addColumn(fam, qual, 125l); + d.addColumn(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2361,22 +2357,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2392,7 +2388,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addColumn(fam, qual, 127l); + d.addColumn(fam, qual, 127L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2412,23 +2408,23 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); } } @@ -2446,7 +2442,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")" + "|(" + TOPSECRET + "&" + SECRET + ")")); - d.addColumn(fam, qual, 127l); + d.addColumn(fam, qual, 127L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2467,22 +2463,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2497,7 +2493,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addColumn(fam, qual, 127l); + d.addColumn(fam, qual, 127L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2517,28 +2513,28 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); } } @Test @@ -2556,7 +2552,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addColumn(fam, qual, 125l); + d.addColumn(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2577,27 +2573,27 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2613,7 +2609,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addColumn(fam, qual, 127l); + d.addColumn(fam, qual, 127L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2633,28 +2629,28 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); } } @Test @@ -2671,7 +2667,7 @@ public class TestVisibilityLabelsWithDeletes { Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); - d.addColumn(fam, qual, 125l); + d.addColumn(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2692,22 +2688,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2723,7 +2719,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addFamily(fam, 124l); + d.addFamily(fam, 124L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2743,18 +2739,18 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); } } @@ -2789,7 +2785,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addColumns(fam, qual, 125l); + d.addColumns(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2810,22 +2806,22 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2841,7 +2837,7 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); - d.addColumn(fam, qual, 127l); + d.addColumn(fam, qual, 127L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2861,17 +2857,17 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -2892,7 +2888,7 @@ public class TestVisibilityLabelsWithDeletes { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); - d.addColumn(fam, qual, 125l); + d.addColumn(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2916,7 +2912,7 @@ public class TestVisibilityLabelsWithDeletes { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); - d.addColumns(fam, qual, 125l); + d.addColumns(fam, qual, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -2941,7 +2937,7 @@ public class TestVisibilityLabelsWithDeletes { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); - d.addFamily(fam, 125l); + d.addFamily(fam, 125L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -3016,7 +3012,7 @@ public class TestVisibilityLabelsWithDeletes { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); - d.addFamilyVersion(fam, 126l); + d.addFamilyVersion(fam, 126L); table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -3043,27 +3039,27 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 127l); + assertEquals(127L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 126l); + assertEquals(126L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 125l); + assertEquals(125L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); cellScanner = next[1].cellScanner(); cellScanner.advance(); current = cellScanner.current(); @@ -3083,11 +3079,11 @@ public class TestVisibilityLabelsWithDeletes { hBaseAdmin.createTable(desc); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 123l, value); + put.addColumn(fam, qual, 123L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put = new Put(Bytes.toBytes("row1")); - put.addColumn(fam, qual, 124l, value); + put.addColumn(fam, qual, 124L, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL + "|" + PRIVATE)); table.put(put); TEST_UTIL.getAdmin().flush(tableName); @@ -3097,7 +3093,7 @@ public class TestVisibilityLabelsWithDeletes { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); - d.addColumn(fam, qual, 124l); + d.addColumn(fam, qual, 124L); d.setCellVisibility(new CellVisibility(PRIVATE )); table.delete(d); } catch (Throwable t) { @@ -3120,12 +3116,12 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 124l); + assertEquals(124L, current.getTimestamp()); cellScanner.advance(); current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - assertEquals(current.getTimestamp(), 123l); + assertEquals(123L, current.getTimestamp()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java index b6a1c6d79a..b82d5036f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java @@ -107,6 +107,7 @@ public class TestVisibilityLabelsWithSLGStack { private static void addLabels() throws Exception { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + @Override public VisibilityLabelsResponse run() throws Exception { String[] labels = { SECRET, CONFIDENTIAL }; try (Connection conn = ConnectionFactory.createConnection(conf)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java index 9f24f6c8c9..906b9d5d75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java @@ -96,6 +96,7 @@ public class TestVisibilityLablesWithGroups { // Set up for the test SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf)) { VisibilityClient.addLabels(conn, new String[] { SECRET, CONFIDENTIAL }); @@ -116,6 +117,7 @@ public class TestVisibilityLablesWithGroups { TEST_UTIL.createTable(tableName, CF); // put the data. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -137,6 +139,7 @@ public class TestVisibilityLablesWithGroups { // 'admin' user is part of 'supergroup', thus can see all the cells. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -178,6 +181,7 @@ public class TestVisibilityLablesWithGroups { // Get testgroup's labels. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { GetAuthsResponse authsResponse = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -197,6 +201,7 @@ public class TestVisibilityLablesWithGroups { // Test that test user can see what 'testgroup' has been authorized to. TESTUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { @@ -281,6 +286,7 @@ public class TestVisibilityLablesWithGroups { // Clear 'testgroup' of CONFIDENTIAL label. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { VisibilityLabelsResponse response = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -295,6 +301,7 @@ public class TestVisibilityLablesWithGroups { // Get testgroup's labels. No label is returned. SUPERUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { GetAuthsResponse authsResponse = null; try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -313,6 +320,7 @@ public class TestVisibilityLablesWithGroups { // Test that test user cannot see the cells with the labels anymore. TESTUSER.runAs(new PrivilegedExceptionAction() { + @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java index fa88a53df4..ebf38a52b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java @@ -136,7 +136,7 @@ public class TestVisibilityWithCheckAuths { Table table = connection.getTable(tableName)) { Put p = new Put(row1); p.setCellVisibility(new CellVisibility(PUBLIC + "&" + TOPSECRET)); - p.addColumn(fam, qual, 125l, value); + p.addColumn(fam, qual, 125L, value); table.put(p); Assert.fail("Testcase should fail with AccesDeniedException"); } catch (Throwable t) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java index 43ba304102..7a55584427 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java @@ -209,25 +209,25 @@ public class TestWithDisabledAuthorization { s.setAuthorizations(new Authorizations()); try (ResultScanner scanner = t.getScanner(s)) { Result[] next = scanner.next(10); - assertEquals(next.length, 4); + assertEquals(4, next.length); } s = new Scan(); s.setAuthorizations(new Authorizations(SECRET)); try (ResultScanner scanner = t.getScanner(s)) { Result[] next = scanner.next(10); - assertEquals(next.length, 4); + assertEquals(4, next.length); } s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); try (ResultScanner scanner = t.getScanner(s)) { Result[] next = scanner.next(10); - assertEquals(next.length, 4); + assertEquals(4, next.length); } s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE)); try (ResultScanner scanner = t.getScanner(s)) { Result[] next = scanner.next(10); - assertEquals(next.length, 4); + assertEquals(4, next.length); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index a2c015c6b5..9b0209652e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -313,6 +313,7 @@ public class TestFlushSnapshotFromClient { // Merge two regions List regions = admin.getTableRegions(TABLE_NAME); Collections.sort(regions, new Comparator() { + @Override public int compare(HRegionInfo r1, HRegionInfo r2) { return Bytes.compareTo(r1.getStartKey(), r2.getStartKey()); } @@ -354,6 +355,7 @@ public class TestFlushSnapshotFromClient { // Merge two regions List regions = admin.getTableRegions(TABLE_NAME); Collections.sort(regions, new Comparator() { + @Override public int compare(HRegionInfo r1, HRegionInfo r2) { return Bytes.compareTo(r1.getStartKey(), r2.getStartKey()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java index 3f7d441065..07fdcd2861 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/MapreduceTestingShim.java @@ -77,6 +77,7 @@ abstract public class MapreduceTestingShim { } private static class MapreduceV1Shim extends MapreduceTestingShim { + @Override public JobContext newJobContext(Configuration jobConf) throws IOException { // Implementing: // return new JobContext(jobConf, new JobID()); @@ -105,6 +106,7 @@ abstract public class MapreduceTestingShim { } } + @Override public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; try { @@ -129,6 +131,7 @@ abstract public class MapreduceTestingShim { }; private static class MapreduceV2Shim extends MapreduceTestingShim { + @Override public JobContext newJobContext(Configuration jobConf) { return newJob(jobConf); } @@ -147,6 +150,7 @@ abstract public class MapreduceTestingShim { } } + @Override public JobConf obtainJobConf(MiniMRCluster cluster) { try { Method meth = MiniMRCluster.class.getMethod("getJobTrackerConf", emptyParam); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java index 8b1c96ed2b..a3ca3230f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java @@ -681,7 +681,7 @@ public class TestLoadIncrementalHFiles { compare[1] = "r".getBytes(); compare[2] = "u".getBytes(); - assertEquals(keysArray.length, 3); + assertEquals(3, keysArray.length); for (int row = 0; row < keysArray.length; row++) { assertArrayEquals(keysArray[row], compare[row]); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java index 56c9ecadf0..2d32a3c8b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java @@ -420,7 +420,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { // check that data was loaded // The three expected attempts are 1) failure because need to split, 2) // load of split top 3) load of split bottom - assertEquals(attemptedCalls.get(), 3); + assertEquals(3, attemptedCalls.get()); assertExpectedTable(table, ROWCOUNT, 2); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 5098e0b389..410dd0c81d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -101,7 +101,7 @@ public abstract class MultiThreadedAction { @Override public byte[] getDeterministicUniqueKey(long keyBase) { - return LoadTestKVGenerator.md5PrefixedKey(keyBase).getBytes(); + return Bytes.toBytes(LoadTestKVGenerator.md5PrefixedKey(keyBase)); } @Override @@ -114,7 +114,7 @@ public abstract class MultiThreadedAction { int numColumns = minColumnsPerKey + random.nextInt(maxColumnsPerKey - minColumnsPerKey + 1); byte[][] columns = new byte[numColumns][]; for (int i = 0; i < numColumns; ++i) { - columns[i] = Integer.toString(i).getBytes(); + columns[i] = Bytes.toBytes(Integer.toString(i)); } return columns; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index 447cca870b..68643663fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -201,8 +201,7 @@ public class MultiThreadedReader extends MultiThreadedAction "to read " + k + " is out of range (startKey=" + startKey + ", endKey=" + endKey + ")"); } - if (k % numThreads != readerId || - writer != null && writer.failedToWriteKey(k)) { + if (k % numThreads != readerId || (writer != null && writer.failedToWriteKey(k))) { // Skip keys that this thread should not read, as well as the keys // that we know the writer failed to write. continue; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java index 7112d50799..7746bea2ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedPriorityBlockingQueue.java @@ -42,7 +42,7 @@ import org.junit.experimental.categories.Category; public class TestBoundedPriorityBlockingQueue { private final static int CAPACITY = 16; - class TestObject { + static class TestObject { private final int priority; private final int seqId; @@ -60,7 +60,7 @@ public class TestBoundedPriorityBlockingQueue { } } - class TestObjectComparator implements Comparator { + static class TestObjectComparator implements Comparator { public TestObjectComparator() {} @Override @@ -208,6 +208,7 @@ public class TestBoundedPriorityBlockingQueue { final CyclicBarrier threadsStarted = new CyclicBarrier(2); ExecutorService executor = Executors.newFixedThreadPool(2); executor.execute(new Runnable() { + @Override public void run() { try { assertNull(queue.poll(1000, TimeUnit.MILLISECONDS)); @@ -221,6 +222,7 @@ public class TestBoundedPriorityBlockingQueue { }); executor.execute(new Runnable() { + @Override public void run() { try { threadsStarted.await(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java index 4c6990ec02..ecc66112d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBuffUtils.java @@ -36,7 +36,7 @@ public class TestByteBuffUtils { ByteBuffer bb2 = ByteBuffer.allocate(50); MultiByteBuff src = new MultiByteBuff(bb1, bb2); for (int i = 0; i < 7; i++) { - src.putLong(8l); + src.putLong(8L); } src.put((byte) 1); src.put((byte) 1); @@ -58,7 +58,7 @@ public class TestByteBuffUtils { bb3 = ByteBuffer.allocate(100); SingleByteBuff sbb = new SingleByteBuff(bb3); for (int i = 0; i < 7; i++) { - sbb.putLong(8l); + sbb.putLong(8L); } sbb.put((byte) 1); sbb.put((byte) 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java index 8f503e034b..a554e99693 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java @@ -161,11 +161,12 @@ public class TestFSHDFSUtils { /** * Version of DFS that has HDFS-4525 in it. */ - class IsFileClosedDistributedFileSystem extends DistributedFileSystem { + static class IsFileClosedDistributedFileSystem extends DistributedFileSystem { /** * Close status of a file. Copied over from HDFS-4525 * @return true if file is already closed **/ + @Override public boolean isFileClosed(Path f) throws IOException{ return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java index cb23a0b350..be302d5981 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java @@ -77,6 +77,7 @@ public class TestFSVisitor { final Set families = new HashSet<>(); final Set hfiles = new HashSet<>(); FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() { + @Override public void storeFile(final String region, final String family, final String hfileName) throws IOException { regions.add(region); @@ -84,9 +85,9 @@ public class TestFSVisitor { hfiles.add(hfileName); } }); - assertEquals(tableRegions, regions); - assertEquals(tableFamilies, families); - assertEquals(tableHFiles, hfiles); + assertEquals(regions, tableRegions); + assertEquals(families, tableFamilies); + assertEquals(hfiles, tableHFiles); } /* diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java index 641f66eb8a..31f1909f10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java @@ -131,12 +131,12 @@ public class TestHBaseFsckEncryption { // Insure HBck doesn't consider them corrupt HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, htd.getTableName()); - assertEquals(res.getRetCode(), 0); + assertEquals(0, res.getRetCode()); HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker(); - assertEquals(hfcc.getCorrupted().size(), 0); - assertEquals(hfcc.getFailures().size(), 0); - assertEquals(hfcc.getQuarantined().size(), 0); - assertEquals(hfcc.getMissing().size(), 0); + assertEquals(0, hfcc.getCorrupted().size()); + assertEquals(0, hfcc.getFailures().size()); + assertEquals(0, hfcc.getQuarantined().size()); + assertEquals(0, hfcc.getMissing().size()); } private List findStorefilePaths(TableName tableName) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index 36612074eb..2548ecfebf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -118,18 +118,18 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck { // A corrupt mob file doesn't abort the start of regions, so we can enable the table. admin.enableTable(table); HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, table); - assertEquals(res.getRetCode(), 0); + assertEquals(0, res.getRetCode()); HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker(); - assertEquals(hfcc.getHFilesChecked(), 4); - assertEquals(hfcc.getCorrupted().size(), 0); - assertEquals(hfcc.getFailures().size(), 0); - assertEquals(hfcc.getQuarantined().size(), 0); - assertEquals(hfcc.getMissing().size(), 0); - assertEquals(hfcc.getMobFilesChecked(), 5); - assertEquals(hfcc.getCorruptedMobFiles().size(), 1); - assertEquals(hfcc.getFailureMobFiles().size(), 0); - assertEquals(hfcc.getQuarantinedMobFiles().size(), 1); - assertEquals(hfcc.getMissedMobFiles().size(), 0); + assertEquals(4, hfcc.getHFilesChecked()); + assertEquals(0, hfcc.getCorrupted().size()); + assertEquals(0, hfcc.getFailures().size()); + assertEquals(0, hfcc.getQuarantined().size()); + assertEquals(0, hfcc.getMissing().size()); + assertEquals(5, hfcc.getMobFilesChecked()); + assertEquals(1, hfcc.getCorruptedMobFiles().size()); + assertEquals(0, hfcc.getFailureMobFiles().size()); + assertEquals(1, hfcc.getQuarantinedMobFiles().size()); + assertEquals(0, hfcc.getMissedMobFiles().size()); String quarantinedMobFile = hfcc.getQuarantinedMobFiles().iterator().next().getName(); assertEquals(corruptMobFile, quarantinedMobFile); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java index 13c6df5d1e..bb68898171 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestJSONMetricUtil.java @@ -86,7 +86,7 @@ public class TestJSONMetricUtil { Hashtable properties = JSONMetricUtil.buldKeyValueTable(keys, values); ObjectName testObject = JSONMetricUtil.buildObjectName(JSONMetricUtil.JAVA_LANG_DOMAIN, properties); - assertEquals(testObject.getDomain(), JSONMetricUtil.JAVA_LANG_DOMAIN); + assertEquals(JSONMetricUtil.JAVA_LANG_DOMAIN, testObject.getDomain()); assertEquals(testObject.getKeyPropertyList(), properties); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java index 7b1cd2df9a..e6b05e9a73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadParallel.java @@ -40,6 +40,7 @@ public class TestMiniClusterLoadParallel super(isMultiPut, encoding); } + @Override @Test(timeout=TIMEOUT_MS) public void loadTest() throws Exception { prepareForLoadTest(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java index fd86bebbfc..eee3030c33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java @@ -68,6 +68,7 @@ public class TestRegionSplitCalculator { return end; } + @Override public String toString() { return "[" + Bytes.toString(start) + ", " + Bytes.toString(end) + "]"; } @@ -135,8 +136,7 @@ public class TestRegionSplitCalculator { LOG.info("Standard"); String res = dump(sc.getSplits(), regions); checkDepths(sc.getSplits(), regions, 1, 1, 1, 0); - assertEquals(res, "A:\t[A, B]\t\n" + "B:\t[B, C]\t\n" + "C:\t[C, D]\t\n" - + "D:\t\n"); + assertEquals("A:\t[A, B]\t\n" + "B:\t[B, C]\t\n" + "C:\t[C, D]\t\nD:\t\n", res); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java index 365070628e..1aab1f8231 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java @@ -20,10 +20,11 @@ import java.io.IOException; import java.util.Random; import java.util.Set; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.util.LoadTestKVGenerator; +import org.apache.yetus.audience.InterfaceAudience; /** * A generator of random data (keys/cfs/columns/values) for load testing. @@ -35,11 +36,11 @@ public abstract class LoadTestDataGenerator { // The mutate info column stores information // about update done to this column family this row. - public final static byte[] MUTATE_INFO = "mutate_info".getBytes(); + public final static byte[] MUTATE_INFO = Bytes.toBytes("mutate_info"); // The increment column always has a long value, // which can be incremented later on during updates. - public final static byte[] INCREMENT = "increment".getBytes(); + public final static byte[] INCREMENT = Bytes.toBytes("increment"); protected String[] args; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index f0f35e75de..ceb43d5ea3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -476,7 +476,7 @@ public class TestWALFactory { reader.close(); // Reset the lease period - setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)}); + setLeasePeriod.invoke(cluster, new Object[]{ 60000L, 3600000L }); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index a1206aa775..dded5062ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -131,9 +131,9 @@ public class TestWALSplit { private static final String FILENAME_BEING_SPLIT = "testfile"; private static final TableName TABLE_NAME = TableName.valueOf("t1"); - private static final byte[] FAMILY = "f1".getBytes(); - private static final byte[] QUALIFIER = "q1".getBytes(); - private static final byte[] VALUE = "v1".getBytes(); + private static final byte[] FAMILY = Bytes.toBytes("f1"); + private static final byte[] QUALIFIER = Bytes.toBytes("q1"); + private static final byte[] VALUE = Bytes.toBytes("v1"); private static final String WAL_FILE_PREFIX = "wal.dat."; private static List REGIONS = new ArrayList<>(); private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors"; @@ -341,7 +341,7 @@ public class TestWALSplit { while (!stop.get()) { try { long seq = appendEntry(writer, TABLE_NAME, regionBytes, - ("r" + editsCount.get()).getBytes(), regionBytes, QUALIFIER, VALUE, 0); + Bytes.toBytes("r" + editsCount.get()), regionBytes, QUALIFIER, VALUE, 0); long count = editsCount.incrementAndGet(); LOG.info(getName() + " sync count=" + count + ", seq=" + seq); try { @@ -407,7 +407,7 @@ public class TestWALSplit { TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); Path parent = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); - assertEquals(parent.getName(), HConstants.RECOVERED_EDITS_DIR); + assertEquals(HConstants.RECOVERED_EDITS_DIR, parent.getName()); fs.createNewFile(parent); // create a recovered.edits file Path p = WALSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR, @@ -588,8 +588,8 @@ public class TestWALSplit { archivedLogs.add(log.getPath().getName()); } LOG.debug(archived.toString()); - assertEquals(failureType.name() + ": expected to find all of our wals corrupt.", - walDirContents, archivedLogs); + assertEquals(failureType.name() + ": expected to find all of our wals corrupt.", archivedLogs, + walDirContents); } } @@ -687,7 +687,7 @@ public class TestWALSplit { // should not have stored the EOF files as corrupt FileStatus[] archivedLogs = fs.listStatus(CORRUPTDIR); - assertEquals(archivedLogs.length, 0); + assertEquals(0, archivedLogs.length); } @@ -749,7 +749,7 @@ public class TestWALSplit { InstrumentedLogWriter.activateFailure = false; appendEntry(writer, TABLE_NAME, Bytes.toBytes(region), - ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0); + Bytes.toBytes("r" + 999), FAMILY, QUALIFIER, VALUE, 0); writer.close(); try { @@ -1206,8 +1206,8 @@ public class TestWALSplit { int prefix = 0; for (String region : REGIONS) { String row_key = region + prefix++ + i + j; - appendEntry(ws[i], TABLE_NAME, region.getBytes(), row_key.getBytes(), FAMILY, QUALIFIER, - VALUE, seq++); + appendEntry(ws[i], TABLE_NAME, Bytes.toBytes(region), Bytes.toBytes(row_key), FAMILY, + QUALIFIER, VALUE, seq++); if (numRegionEventsAdded < regionEvents) { numRegionEventsAdded ++; @@ -1233,7 +1233,7 @@ public class TestWALSplit { Path tdir = FSUtils.getTableDir(rootdir, table); @SuppressWarnings("deprecation") Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, - Bytes.toString(region.getBytes()))); + Bytes.toString(Bytes.toBytes(region)))); FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { @Override public boolean accept(Path p) { @@ -1260,46 +1260,46 @@ public class TestWALSplit { in.close(); switch (corruption) { - case APPEND_GARBAGE: - fs.delete(path, false); - out = fs.create(path); - out.write(corrupted_bytes); - out.write("-----".getBytes()); - closeOrFlush(close, out); - break; - - case INSERT_GARBAGE_ON_FIRST_LINE: - fs.delete(path, false); - out = fs.create(path); - out.write(0); - out.write(corrupted_bytes); - closeOrFlush(close, out); - break; - - case INSERT_GARBAGE_IN_THE_MIDDLE: - fs.delete(path, false); - out = fs.create(path); - int middle = (int) Math.floor(corrupted_bytes.length / 2); - out.write(corrupted_bytes, 0, middle); - out.write(0); - out.write(corrupted_bytes, middle, corrupted_bytes.length - middle); - closeOrFlush(close, out); - break; - - case TRUNCATE: - fs.delete(path, false); - out = fs.create(path); - out.write(corrupted_bytes, 0, fileSize - - (32 + ProtobufLogReader.PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT)); - closeOrFlush(close, out); - break; - - case TRUNCATE_TRAILER: - fs.delete(path, false); - out = fs.create(path); - out.write(corrupted_bytes, 0, fileSize - Bytes.SIZEOF_INT);// trailer is truncated. - closeOrFlush(close, out); - break; + case APPEND_GARBAGE: + fs.delete(path, false); + out = fs.create(path); + out.write(corrupted_bytes); + out.write(Bytes.toBytes("-----")); + closeOrFlush(close, out); + break; + + case INSERT_GARBAGE_ON_FIRST_LINE: + fs.delete(path, false); + out = fs.create(path); + out.write(0); + out.write(corrupted_bytes); + closeOrFlush(close, out); + break; + + case INSERT_GARBAGE_IN_THE_MIDDLE: + fs.delete(path, false); + out = fs.create(path); + int middle = (int) Math.floor(corrupted_bytes.length / 2); + out.write(corrupted_bytes, 0, middle); + out.write(0); + out.write(corrupted_bytes, middle, corrupted_bytes.length - middle); + closeOrFlush(close, out); + break; + + case TRUNCATE: + fs.delete(path, false); + out = fs.create(path); + out.write(corrupted_bytes, 0, fileSize + - (32 + ProtobufLogReader.PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT)); + closeOrFlush(close, out); + break; + + case TRUNCATE_TRAILER: + fs.delete(path, false); + out = fs.create(path); + out.write(corrupted_bytes, 0, fileSize - Bytes.SIZEOF_INT);// trailer is truncated. + closeOrFlush(close, out); + break; } } @@ -1360,14 +1360,14 @@ public class TestWALSplit { WALProtos.RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( WALProtos.RegionEventDescriptor.EventType.REGION_OPEN, TABLE_NAME.toBytes(), - region.getBytes(), - String.valueOf(region.hashCode()).getBytes(), + Bytes.toBytes(region), + Bytes.toBytes(String.valueOf(region.hashCode())), 1, ServerName.parseServerName("ServerName:9099"), ImmutableMap.>of()); final long time = EnvironmentEdgeManager.currentTime(); - KeyValue kv = new KeyValue(region.getBytes(), WALEdit.METAFAMILY, WALEdit.REGION_EVENT, + KeyValue kv = new KeyValue(Bytes.toBytes(region), WALEdit.METAFAMILY, WALEdit.REGION_EVENT, time, regionOpenDesc.toByteArray()); - final WALKeyImpl walKey = new WALKeyImpl(region.getBytes(), TABLE_NAME, 1, time, + final WALKeyImpl walKey = new WALKeyImpl(Bytes.toBytes(region), TABLE_NAME, 1, time, HConstants.DEFAULT_CLUSTER_ID); w.append( new Entry(walKey, new WALEdit().add(kv))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java index 844cb3a27e..400d12b309 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitBoundedLogWriterCreation.java @@ -35,6 +35,7 @@ public class TestWALSplitBoundedLogWriterCreation extends TestWALSplit{ /** * The logic of this test has conflict with the limit writers split logic, skip this test */ + @Override @Test(timeout=300000) @Ignore public void testThreadingSlowWriterSmallBuffer() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java index 4e67b9171d..3e9e650b37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java @@ -129,10 +129,10 @@ public class TestZooKeeperACL { List acls = zkw.getRecoverableZooKeeper().getZooKeeper() .getACL("/hbase", new Stat()); - assertEquals(acls.size(),1); - assertEquals(acls.get(0).getId().getScheme(),"sasl"); - assertEquals(acls.get(0).getId().getId(),"hbase"); - assertEquals(acls.get(0).getPerms(), ZooDefs.Perms.ALL); + assertEquals(1, acls.size()); + assertEquals("sasl", acls.get(0).getId().getScheme()); + assertEquals("hbase", acls.get(0).getId().getId()); + assertEquals(ZooDefs.Perms.ALL, acls.get(0).getPerms()); } /** @@ -148,20 +148,20 @@ public class TestZooKeeperACL { List acls = zkw.getRecoverableZooKeeper().getZooKeeper() .getACL("/hbase/root-region-server", new Stat()); - assertEquals(acls.size(),2); + assertEquals(2, acls.size()); boolean foundWorldReadableAcl = false; boolean foundHBaseOwnerAcl = false; for(int i = 0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { - assertEquals(acls.get(0).getId().getId(),"anyone"); - assertEquals(acls.get(0).getPerms(), ZooDefs.Perms.READ); + assertEquals("anyone", acls.get(0).getId().getId()); + assertEquals(ZooDefs.Perms.READ, acls.get(0).getPerms()); foundWorldReadableAcl = true; } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { - assertEquals(acls.get(1).getId().getId(),"hbase"); - assertEquals(acls.get(1).getId().getScheme(),"sasl"); + assertEquals("hbase", acls.get(1).getId().getId()); + assertEquals("sasl", acls.get(1).getId().getScheme()); foundHBaseOwnerAcl = true; } else { // error: should not get here: test fails. assertTrue(false); @@ -185,19 +185,19 @@ public class TestZooKeeperACL { List acls = zkw.getRecoverableZooKeeper().getZooKeeper() .getACL("/hbase/master", new Stat()); - assertEquals(acls.size(),2); + assertEquals(2, acls.size()); boolean foundWorldReadableAcl = false; boolean foundHBaseOwnerAcl = false; for(int i = 0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { - assertEquals(acls.get(0).getId().getId(),"anyone"); - assertEquals(acls.get(0).getPerms(), ZooDefs.Perms.READ); + assertEquals("anyone", acls.get(0).getId().getId()); + assertEquals(ZooDefs.Perms.READ, acls.get(0).getPerms()); foundWorldReadableAcl = true; } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { - assertEquals(acls.get(1).getId().getId(),"hbase"); - assertEquals(acls.get(1).getId().getScheme(),"sasl"); + assertEquals("hbase", acls.get(1).getId().getId()); + assertEquals("sasl", acls.get(1).getId().getScheme()); foundHBaseOwnerAcl = true; } else { // error: should not get here: test fails. assertTrue(false); @@ -221,19 +221,19 @@ public class TestZooKeeperACL { List acls = zkw.getRecoverableZooKeeper().getZooKeeper() .getACL("/hbase/hbaseid", new Stat()); - assertEquals(acls.size(),2); + assertEquals(2, acls.size()); boolean foundWorldReadableAcl = false; boolean foundHBaseOwnerAcl = false; for(int i = 0; i < 2; i++) { if (acls.get(i).getId().getScheme().equals("world") == true) { - assertEquals(acls.get(0).getId().getId(),"anyone"); - assertEquals(acls.get(0).getPerms(), ZooDefs.Perms.READ); + assertEquals("anyone", acls.get(0).getId().getId()); + assertEquals(ZooDefs.Perms.READ, acls.get(0).getPerms()); foundWorldReadableAcl = true; } else { if (acls.get(i).getId().getScheme().equals("sasl") == true) { - assertEquals(acls.get(1).getId().getId(),"hbase"); - assertEquals(acls.get(1).getId().getScheme(),"sasl"); + assertEquals("hbase", acls.get(1).getId().getId()); + assertEquals("sasl", acls.get(1).getId().getScheme()); foundHBaseOwnerAcl = true; } else { // error: should not get here: test fails. assertTrue(false); @@ -257,10 +257,10 @@ public class TestZooKeeperACL { ZKUtil.createWithParents(zkw, "/testACLNode"); List acls = zkw.getRecoverableZooKeeper().getZooKeeper() .getACL("/testACLNode", new Stat()); - assertEquals(acls.size(),1); - assertEquals(acls.get(0).getId().getScheme(),"sasl"); - assertEquals(acls.get(0).getId().getId(),"hbase"); - assertEquals(acls.get(0).getPerms(), ZooDefs.Perms.ALL); + assertEquals(1, acls.size()); + assertEquals("sasl", acls.get(0).getId().getScheme()); + assertEquals("hbase", acls.get(0).getId().getId()); + assertEquals(ZooDefs.Perms.ALL, acls.get(0).getPerms()); } /** @@ -281,7 +281,7 @@ public class TestZooKeeperACL { saslConfFile.getAbsolutePath()); testJaasConfig = ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); - assertEquals(testJaasConfig, false); + assertEquals(false, testJaasConfig); saslConfFile.delete(); } @@ -295,13 +295,13 @@ public class TestZooKeeperACL { Configuration config = new Configuration(HBaseConfiguration.create()); boolean testJaasConfig = ZKUtil.isSecureZooKeeper(config); - assertEquals(testJaasConfig, false); + assertEquals(false, testJaasConfig); // Now set authentication scheme to Kerberos still it should return false // because no configuration set config.set("hbase.security.authentication", "kerberos"); testJaasConfig = ZKUtil.isSecureZooKeeper(config); - assertEquals(testJaasConfig, false); + assertEquals(false, testJaasConfig); // Now set programmatic options related to security config.set(HConstants.ZK_CLIENT_KEYTAB_FILE, "/dummy/file"); -- 2.15.1